





<!DOCTYPE html>
<html class="writer-html5" lang="zh-CN" >
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>tvm.relay.nn &mdash; tvm 0.8.dev1982 文档</title>
  

  
  <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/gallery.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/css/tlcpack_theme.css" type="text/css" />

  
  
    <link rel="shortcut icon" href="../../../../_static/tvm-logo-square.png"/>
  

  
  
  
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../../../" src="../../../../_static/documentation_options.js"></script>
        <script data-url_root="../../../../" id="documentation_options" src="../../../../_static/documentation_options.js"></script>
        <script src="../../../../_static/jquery.js"></script>
        <script src="../../../../_static/underscore.js"></script>
        <script src="../../../../_static/doctools.js"></script>
        <script src="../../../../_static/translations.js"></script>
        <script async="async" src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
    
    <script type="text/javascript" src="../../../../_static/js/theme.js"></script>

    
    <script type="text/javascript" src="../../../../_static/js/tlcpack_theme.js"></script>
    <link rel="index" title="索引" href="../../../../genindex.html" />
    <link rel="search" title="搜索" href="../../../../search.html" />
    <link rel="next" title="tvm.relay.vision" href="vision.html" />
    <link rel="prev" title="tvm.relay.frontend" href="frontend.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    
<header class="header">
    <div class="innercontainer">
      <div class="headerInner d-flex justify-content-between align-items-center">
          <div class="headerLogo">
               <a href="https://tvm.apache.org/"><img src=https://tvm.apache.org/assets/images/logo.svg alt="logo"></a>
          </div>

          <div id="headMenu" class="headerNav">
            <button type="button" id="closeHeadMenu" class="navCloseBtn"><img src="../../../../_static/img/close-icon.svg" alt="Close"></button>
             <ul class="nav">
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/community>Community</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/download>Download</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/vta>VTA</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/blog>Blog</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/docs>Docs</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmconf.org>Conference</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://github.com/apache/tvm/>Github</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmchinese.github.io/declaration_zh_CN.html>About-Translators</a>
                </li>
             </ul>
               <div class="responsivetlcdropdown">
                 <button type="button" class="btn-link">
                   ASF
                 </button>
                 <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                 </ul>
               </div>
          </div>
            <div class="responsiveMenuIcon">
              <button type="button" id="menuBtn" class="btn-menu"><img src="../../../../_static/img/menu-icon.svg" alt="Menu Icon"></button>
            </div>

            <div class="tlcDropdown">
              <div class="dropdown">
                <button type="button" class="btn-link dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
                  ASF
                </button>
                <div class="dropdown-menu dropdown-menu-right">
                  <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                  </ul>
                </div>
              </div>
          </div>
       </div>
    </div>
 </header>
 
    <nav data-toggle="wy-nav-shift" class="wy-nav-side fixed">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../../../index.html">
          

          
            
            <img src="../../../../_static/tvm-logo-small.png" class="logo" alt="Logo"/>
          
          </a>

          
            
            
                <div class="version">
                  0.8.dev1982
                </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption" role="heading"><span class="caption-text">如何开始</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../install/index.html">安装 TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../contribute/index.html">贡献者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">用户引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../tutorial/index.html">User Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../how_to/index.html">How To Guides</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">开发者引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../dev/tutorial/index.html">Developer Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../dev/how_to/how_to.html">开发者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">架构指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../arch/index.html">Design and Architecture</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">主题引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../topic/microtvm/index.html">microTVM：裸机使用TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../topic/vta/index.html">VTA: Versatile Tensor Accelerator</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">参考指南</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../../../langref/index.html">语言参考</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="../index.html">Python API</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="../runtime.html">tvm.runtime</a></li>
<li class="toctree-l2"><a class="reference internal" href="../ndarray.html">tvm.runtime.ndarray</a></li>
<li class="toctree-l2"><a class="reference internal" href="../error.html">tvm.error</a></li>
<li class="toctree-l2"><a class="reference internal" href="../ir.html">tvm.ir</a></li>
<li class="toctree-l2"><a class="reference internal" href="../ir.html#module-tvm.instrument">tvm.instrument</a></li>
<li class="toctree-l2"><a class="reference internal" href="../ir.html#module-tvm.transform">tvm.transform</a></li>
<li class="toctree-l2"><a class="reference internal" href="../target.html">tvm.target</a></li>
<li class="toctree-l2"><a class="reference internal" href="../tir.html">tvm.tir</a></li>
<li class="toctree-l2"><a class="reference internal" href="../tir.html#module-tvm.tir.transform">tvm.tir.transform</a></li>
<li class="toctree-l2"><a class="reference internal" href="../tir.html#tvm-tir-analysis">tvm.tir.analysis</a></li>
<li class="toctree-l2"><a class="reference internal" href="../tir.html#module-tvm.tir.stmt_functor">tvm.tir.stmt_functor</a></li>
<li class="toctree-l2"><a class="reference internal" href="../te.html">tvm.te</a></li>
<li class="toctree-l2"><a class="reference internal" href="../te.html#module-tvm.te.hybrid">tvm.te.hybrid</a></li>
<li class="toctree-l2"><a class="reference internal" href="../driver.html">tvm.driver</a></li>
<li class="toctree-l2"><a class="reference internal" href="index.html">tvm.relay</a></li>
<li class="toctree-l2"><a class="reference internal" href="frontend.html">tvm.relay.frontend</a></li>
<li class="toctree-l2 current"><a class="current reference internal" href="#">tvm.relay.nn</a></li>
<li class="toctree-l2"><a class="reference internal" href="vision.html">tvm.relay.vision</a></li>
<li class="toctree-l2"><a class="reference internal" href="image.html">tvm.relay.image</a></li>
<li class="toctree-l2"><a class="reference internal" href="transform.html">tvm.relay.transform</a></li>
<li class="toctree-l2"><a class="reference internal" href="analysis.html">tvm.relay.analysis</a></li>
<li class="toctree-l2"><a class="reference internal" href="backend.html">tvm.relay.backend</a></li>
<li class="toctree-l2"><a class="reference internal" href="dataflow_pattern.html">tvm.relay.dataflow_pattern</a></li>
<li class="toctree-l2"><a class="reference internal" href="testing.html">tvm.relay.testing</a></li>
<li class="toctree-l2"><a class="reference internal" href="../autotvm.html">tvm.autotvm</a></li>
<li class="toctree-l2"><a class="reference internal" href="../auto_scheduler.html">tvm.auto_scheduler</a></li>
<li class="toctree-l2"><a class="reference internal" href="../rpc.html">tvm.rpc</a></li>
<li class="toctree-l2"><a class="reference internal" href="../micro.html">tvm.micro</a></li>
<li class="toctree-l2"><a class="reference internal" href="../contrib.html">tvm.contrib</a></li>
<li class="toctree-l2"><a class="reference internal" href="../graph_executor.html">tvm.contrib.graph_executor</a></li>
<li class="toctree-l2"><a class="reference internal" href="../topi.html">tvm.topi</a></li>
<li class="toctree-l2"><a class="reference internal" href="../vta/index.html">vta</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../links.html">Other APIs</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../publications.html">Publications</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../genindex.html">索引</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
      
      <nav class="wy-nav-top" aria-label="top navigation" data-toggle="wy-nav-top">
        
            <div class="togglemenu">

            </div>
            <div class="nav-content">
              <!-- tvm -->
              Table of content
            </div>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        

          




















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../../../index.html">Docs</a> <span class="br-arrow">></span></li>
        
          <li><a href="../index.html">Python API</a> <span class="br-arrow">></span></li>
        
      <li>tvm.relay.nn</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
            <a href="../../../../_sources/reference/api/python/relay/nn.rst.txt" rel="nofollow"> <img src="../../../../_static//img/source.svg" alt="viewsource"/></a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="module-tvm.relay.nn">
<span id="tvm-relay-nn"></span><h1>tvm.relay.nn<a class="headerlink" href="#module-tvm.relay.nn" title="永久链接至标题">¶</a></h1>
<p>Neural network related operators.</p>
<p><strong>类：</strong></p>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 90%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.Constant" title="tvm.relay.nn.Constant"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Constant</span></code></a>(data)</p></td>
<td><p>A constant expression in Relay.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.Expr" title="tvm.relay.nn.Expr"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Expr</span></code></a></p></td>
<td><p>alias of <a class="reference internal" href="../ir.html#tvm.ir.RelayExpr" title="tvm.ir.expr.RelayExpr"><code class="xref py py-class docutils literal notranslate"><span class="pre">tvm.ir.expr.RelayExpr</span></code></a></p></td>
</tr>
</tbody>
</table>
<p><strong>函数：</strong></p>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 90%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.adaptive_avg_pool1d" title="tvm.relay.nn.adaptive_avg_pool1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">adaptive_avg_pool1d</span></code></a>(data[, output_size, layout])</p></td>
<td><p>1D adaptive average pooling operator.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.adaptive_avg_pool2d" title="tvm.relay.nn.adaptive_avg_pool2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">adaptive_avg_pool2d</span></code></a>(data[, output_size, layout])</p></td>
<td><p>2D adaptive average pooling operator.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.adaptive_avg_pool3d" title="tvm.relay.nn.adaptive_avg_pool3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">adaptive_avg_pool3d</span></code></a>(data[, output_size, layout])</p></td>
<td><p>3D adaptive avg pooling operator.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.adaptive_max_pool1d" title="tvm.relay.nn.adaptive_max_pool1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">adaptive_max_pool1d</span></code></a>(data[, output_size, layout])</p></td>
<td><p>1D adaptive max pooling operator.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.adaptive_max_pool2d" title="tvm.relay.nn.adaptive_max_pool2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">adaptive_max_pool2d</span></code></a>(data[, output_size, layout])</p></td>
<td><p>2D adaptive max pooling operator.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.adaptive_max_pool3d" title="tvm.relay.nn.adaptive_max_pool3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">adaptive_max_pool3d</span></code></a>(data[, output_size, layout])</p></td>
<td><p>3D adaptive max pooling operator.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.avg_pool1d" title="tvm.relay.nn.avg_pool1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">avg_pool1d</span></code></a>(data[, pool_size, strides, …])</p></td>
<td><p>1D average pooling operator.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.avg_pool2d" title="tvm.relay.nn.avg_pool2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">avg_pool2d</span></code></a>(data[, pool_size, strides, …])</p></td>
<td><p>2D 平均池化算子。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.avg_pool2d_grad" title="tvm.relay.nn.avg_pool2d_grad"><code class="xref py py-obj docutils literal notranslate"><span class="pre">avg_pool2d_grad</span></code></a>(out_grad, data[, pool_size, …])</p></td>
<td><p>Gradient of 2D average pooling operator.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.avg_pool3d" title="tvm.relay.nn.avg_pool3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">avg_pool3d</span></code></a>(data[, pool_size, strides, …])</p></td>
<td><p>3D 平均池化算子。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.batch_flatten" title="tvm.relay.nn.batch_flatten"><code class="xref py py-obj docutils literal notranslate"><span class="pre">batch_flatten</span></code></a>(data)</p></td>
<td><p>BatchFlatten.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.batch_matmul" title="tvm.relay.nn.batch_matmul"><code class="xref py py-obj docutils literal notranslate"><span class="pre">batch_matmul</span></code></a>(tensor_a, tensor_b[, …])</p></td>
<td><p>Compute batch matrix multiplication of <cite>tensor_a</cite> and <cite>tensor_b</cite>.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.batch_norm" title="tvm.relay.nn.batch_norm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">batch_norm</span></code></a>(data, gamma, beta, moving_mean, …)</p></td>
<td><p>批量处理规范化层 （Ioffe and Szegedy, 2014）。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.batch_to_space_nd" title="tvm.relay.nn.batch_to_space_nd"><code class="xref py py-obj docutils literal notranslate"><span class="pre">batch_to_space_nd</span></code></a>(data, block_shape, crops)</p></td>
<td><p>Reshape the batch dimension into spatial dimensions.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.bias_add" title="tvm.relay.nn.bias_add"><code class="xref py py-obj docutils literal notranslate"><span class="pre">bias_add</span></code></a>(data, bias[, axis])</p></td>
<td><p>add_bias 算子。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.bitpack" title="tvm.relay.nn.bitpack"><code class="xref py py-obj docutils literal notranslate"><span class="pre">bitpack</span></code></a>(data[, bits, pack_axis, bit_axis, …])</p></td>
<td><p>对于位序列算子的张量包装。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.bitserial_conv2d" title="tvm.relay.nn.bitserial_conv2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">bitserial_conv2d</span></code></a>(data, weight[, strides, …])</p></td>
<td><p>使用位序列计算的 2D 卷积。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.bitserial_dense" title="tvm.relay.nn.bitserial_dense"><code class="xref py py-obj docutils literal notranslate"><span class="pre">bitserial_dense</span></code></a>(data, weight[, units, …])</p></td>
<td><p>位序列稠密算子。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.const" title="tvm.relay.nn.const"><code class="xref py py-obj docutils literal notranslate"><span class="pre">const</span></code></a>(value[, dtype])</p></td>
<td><p>Create a constant value.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.contrib_conv2d_gemm_weight_transform" title="tvm.relay.nn.contrib_conv2d_gemm_weight_transform"><code class="xref py py-obj docutils literal notranslate"><span class="pre">contrib_conv2d_gemm_weight_transform</span></code></a>(…)</p></td>
<td><p>Weight Transformation part for 2D convolution with gemm algorithm.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.contrib_conv2d_gemm_without_weight_transform" title="tvm.relay.nn.contrib_conv2d_gemm_without_weight_transform"><code class="xref py py-obj docutils literal notranslate"><span class="pre">contrib_conv2d_gemm_without_weight_transform</span></code></a>(…)</p></td>
<td><p>2D convolution with gemm algorithm.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.contrib_conv2d_nchwc" title="tvm.relay.nn.contrib_conv2d_nchwc"><code class="xref py py-obj docutils literal notranslate"><span class="pre">contrib_conv2d_nchwc</span></code></a>(data, kernel[, …])</p></td>
<td><p>Variant of 2D convolution.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.contrib_conv2d_winograd_nnpack_weight_transform" title="tvm.relay.nn.contrib_conv2d_winograd_nnpack_weight_transform"><code class="xref py py-obj docutils literal notranslate"><span class="pre">contrib_conv2d_winograd_nnpack_weight_transform</span></code></a>(…)</p></td>
<td><p>基于 Winograd 算法对 2D 卷积的转换部分进行加权。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.contrib_conv2d_winograd_weight_transform" title="tvm.relay.nn.contrib_conv2d_winograd_weight_transform"><code class="xref py py-obj docutils literal notranslate"><span class="pre">contrib_conv2d_winograd_weight_transform</span></code></a>(…)</p></td>
<td><p>基于 Winograd 算法对 2D 卷积的转换部分进行加权。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.contrib_conv2d_winograd_without_weight_transform" title="tvm.relay.nn.contrib_conv2d_winograd_without_weight_transform"><code class="xref py py-obj docutils literal notranslate"><span class="pre">contrib_conv2d_winograd_without_weight_transform</span></code></a>(…)</p></td>
<td><p>基于 Winograd 算法的 2D 卷积。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.contrib_conv3d_winograd_weight_transform" title="tvm.relay.nn.contrib_conv3d_winograd_weight_transform"><code class="xref py py-obj docutils literal notranslate"><span class="pre">contrib_conv3d_winograd_weight_transform</span></code></a>(…)</p></td>
<td><p>基于 Winograd 算法对 3D 卷积的转换部分进行加权。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.contrib_conv3d_winograd_without_weight_transform" title="tvm.relay.nn.contrib_conv3d_winograd_without_weight_transform"><code class="xref py py-obj docutils literal notranslate"><span class="pre">contrib_conv3d_winograd_without_weight_transform</span></code></a>(…)</p></td>
<td><p>基于 Winograd 算法的 3D 卷积。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.contrib_dense_pack" title="tvm.relay.nn.contrib_dense_pack"><code class="xref py py-obj docutils literal notranslate"><span class="pre">contrib_dense_pack</span></code></a>(data, weight[, …])</p></td>
<td><p>稠密算子。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.contrib_depthwise_conv2d_nchwc" title="tvm.relay.nn.contrib_depthwise_conv2d_nchwc"><code class="xref py py-obj docutils literal notranslate"><span class="pre">contrib_depthwise_conv2d_nchwc</span></code></a>(data, kernel)</p></td>
<td><p>Variant of 2D depthwise convolution.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.conv1d" title="tvm.relay.nn.conv1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">conv1d</span></code></a>(data, weight[, strides, padding, …])</p></td>
<td><p>1D convolution.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.conv1d_transpose" title="tvm.relay.nn.conv1d_transpose"><code class="xref py py-obj docutils literal notranslate"><span class="pre">conv1d_transpose</span></code></a>(data, weight[, strides, …])</p></td>
<td><p>One dimensional transposed convolution operator.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.conv2d" title="tvm.relay.nn.conv2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">conv2d</span></code></a>(data, weight[, strides, padding, …])</p></td>
<td><p>2D 卷积。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.conv2d_transpose" title="tvm.relay.nn.conv2d_transpose"><code class="xref py py-obj docutils literal notranslate"><span class="pre">conv2d_transpose</span></code></a>(data, weight[, strides, …])</p></td>
<td><p>二维转置卷积算子。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.conv3d" title="tvm.relay.nn.conv3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">conv3d</span></code></a>(data, weight[, strides, padding, …])</p></td>
<td><p>3D 卷积。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.conv3d_transpose" title="tvm.relay.nn.conv3d_transpose"><code class="xref py py-obj docutils literal notranslate"><span class="pre">conv3d_transpose</span></code></a>(data, weight[, strides, …])</p></td>
<td><p>3D 转置卷积。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.correlation" title="tvm.relay.nn.correlation"><code class="xref py py-obj docutils literal notranslate"><span class="pre">correlation</span></code></a>(data1, data2, kernel_size, …)</p></td>
<td><p>Applies correlation to inputs.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.cross_entropy" title="tvm.relay.nn.cross_entropy"><code class="xref py py-obj docutils literal notranslate"><span class="pre">cross_entropy</span></code></a>(predictions, targets)</p></td>
<td><p>CrossEntropy without logits.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.cross_entropy_with_logits" title="tvm.relay.nn.cross_entropy_with_logits"><code class="xref py py-obj docutils literal notranslate"><span class="pre">cross_entropy_with_logits</span></code></a>(predictions, targets)</p></td>
<td><p>CrossEntropy with logits.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.deformable_conv2d" title="tvm.relay.nn.deformable_conv2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">deformable_conv2d</span></code></a>(data, offset, weight[, …])</p></td>
<td><p>Deformable 2d convolution.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.dense" title="tvm.relay.nn.dense"><code class="xref py py-obj docutils literal notranslate"><span class="pre">dense</span></code></a>(data, weight[, units, out_dtype])</p></td>
<td><p>稠密算子。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.depth_to_space" title="tvm.relay.nn.depth_to_space"><code class="xref py py-obj docutils literal notranslate"><span class="pre">depth_to_space</span></code></a>(data, block_size[, layout, mode])</p></td>
<td><p>Convert channels into spatial blocks.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.dilate" title="tvm.relay.nn.dilate"><code class="xref py py-obj docutils literal notranslate"><span class="pre">dilate</span></code></a>(data, strides[, dilation_value])</p></td>
<td><p>Dilate data with given dilation value (0 by default).</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.dropout" title="tvm.relay.nn.dropout"><code class="xref py py-obj docutils literal notranslate"><span class="pre">dropout</span></code></a>(data[, rate])</p></td>
<td><p>对输入数列使用 dropout 算子。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.dropout_raw" title="tvm.relay.nn.dropout_raw"><code class="xref py py-obj docutils literal notranslate"><span class="pre">dropout_raw</span></code></a>(data[, rate])</p></td>
<td><p>对输入数列使用 dropout 算子。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.fast_softmax" title="tvm.relay.nn.fast_softmax"><code class="xref py py-obj docutils literal notranslate"><span class="pre">fast_softmax</span></code></a>(data[, axis])</p></td>
<td><p>计算 softmax。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.fifo_buffer" title="tvm.relay.nn.fifo_buffer"><code class="xref py py-obj docutils literal notranslate"><span class="pre">fifo_buffer</span></code></a>(data, buffer, axis)</p></td>
<td><p>FIFO buffer to enable computation reuse in CNNs with sliding indow input</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.get_pad_tuple1d" title="tvm.relay.nn.get_pad_tuple1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">get_pad_tuple1d</span></code></a>(padding)</p></td>
<td><p>Common code to get the 1 dimensional pad option :param padding: Padding size :type padding: Union[int, Tuple[int, …]]</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.get_pad_tuple2d" title="tvm.relay.nn.get_pad_tuple2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">get_pad_tuple2d</span></code></a>(padding)</p></td>
<td><p>Common code to get the pad option :param padding: Padding size :type padding: Union[int, Tuple[int, …]]</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.get_pad_tuple3d" title="tvm.relay.nn.get_pad_tuple3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">get_pad_tuple3d</span></code></a>(padding)</p></td>
<td><p>Common code to get the pad option :param padding: Padding size :type padding: Union[int, Tuple[int, …]]</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.global_avg_pool1d" title="tvm.relay.nn.global_avg_pool1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">global_avg_pool1d</span></code></a>(data[, layout])</p></td>
<td><p>1D global average pooling operator.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.global_avg_pool2d" title="tvm.relay.nn.global_avg_pool2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">global_avg_pool2d</span></code></a>(data[, layout])</p></td>
<td><p>2D 全局平均池化算子。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.global_avg_pool3d" title="tvm.relay.nn.global_avg_pool3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">global_avg_pool3d</span></code></a>(data[, layout])</p></td>
<td><p>3D global average pooling operator.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.global_max_pool1d" title="tvm.relay.nn.global_max_pool1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">global_max_pool1d</span></code></a>(data[, layout])</p></td>
<td><p>1D global maximum pooling operator.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.global_max_pool2d" title="tvm.relay.nn.global_max_pool2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">global_max_pool2d</span></code></a>(data[, layout])</p></td>
<td><p>2D 全局最大池化算子。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.global_max_pool3d" title="tvm.relay.nn.global_max_pool3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">global_max_pool3d</span></code></a>(data[, layout])</p></td>
<td><p>3D global maximum pooling operator.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.group_norm" title="tvm.relay.nn.group_norm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">group_norm</span></code></a>(data, gamma, beta, num_groups[, …])</p></td>
<td><p>Group normalization normalizes over group of channels for each training examples.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.instance_norm" title="tvm.relay.nn.instance_norm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">instance_norm</span></code></a>(data, gamma, beta[, axis, …])</p></td>
<td><p>Instance Normalization (Ulyanov and et al., 2016) Applies instance normalization to the n-dimensional input array.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.l2_normalize" title="tvm.relay.nn.l2_normalize"><code class="xref py py-obj docutils literal notranslate"><span class="pre">l2_normalize</span></code></a>(data, eps[, axis])</p></td>
<td><p>对输入数据执行L2标准化</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.layer_norm" title="tvm.relay.nn.layer_norm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">layer_norm</span></code></a>(data, gamma, beta[, axis, …])</p></td>
<td><p>Layer normalization (Lei Ba and et al., 2016).</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.leaky_relu" title="tvm.relay.nn.leaky_relu"><code class="xref py py-obj docutils literal notranslate"><span class="pre">leaky_relu</span></code></a>(data[, alpha])</p></td>
<td><p>该算子将数据作为输入，并代入带泄露的修正线性单元。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.log_softmax" title="tvm.relay.nn.log_softmax"><code class="xref py py-obj docutils literal notranslate"><span class="pre">log_softmax</span></code></a>(data[, axis])</p></td>
<td><p>计算 log_softmax。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.lrn" title="tvm.relay.nn.lrn"><code class="xref py py-obj docutils literal notranslate"><span class="pre">lrn</span></code></a>(data[, size, axis, bias, alpha, beta])</p></td>
<td><p>This operator takes data as input and does local response normalization.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.matmul" title="tvm.relay.nn.matmul"><code class="xref py py-obj docutils literal notranslate"><span class="pre">matmul</span></code></a>(tensor_a, tensor_b[, units, …])</p></td>
<td><p>Matmul operator.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.max_pool1d" title="tvm.relay.nn.max_pool1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">max_pool1d</span></code></a>(data[, pool_size, strides, …])</p></td>
<td><p>1D maximum pooling operator.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.max_pool2d" title="tvm.relay.nn.max_pool2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">max_pool2d</span></code></a>(data[, pool_size, strides, …])</p></td>
<td><p>2D 最大池化算子。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.max_pool2d_grad" title="tvm.relay.nn.max_pool2d_grad"><code class="xref py py-obj docutils literal notranslate"><span class="pre">max_pool2d_grad</span></code></a>(out_grad, data[, pool_size, …])</p></td>
<td><p>Gradient of 2D maximum pooling operator.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.max_pool3d" title="tvm.relay.nn.max_pool3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">max_pool3d</span></code></a>(data[, pool_size, strides, …])</p></td>
<td><p>3D 最大池化算子。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.mirror_pad" title="tvm.relay.nn.mirror_pad"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mirror_pad</span></code></a>(data, pad_width[, mode])</p></td>
<td><p>MirrorPadding</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.nll_loss" title="tvm.relay.nn.nll_loss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">nll_loss</span></code></a>(predictions, targets, weights[, …])</p></td>
<td><p>Negative log likelihood loss.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.pad" title="tvm.relay.nn.pad"><code class="xref py py-obj docutils literal notranslate"><span class="pre">pad</span></code></a>(data, pad_width[, pad_value, pad_mode])</p></td>
<td><p>填充</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.prelu" title="tvm.relay.nn.prelu"><code class="xref py py-obj docutils literal notranslate"><span class="pre">prelu</span></code></a>(data, alpha[, axis])</p></td>
<td><p>该算子将数据作为输入，并代入带泄露的修正线性单元。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.relu" title="tvm.relay.nn.relu"><code class="xref py py-obj docutils literal notranslate"><span class="pre">relu</span></code></a>(data)</p></td>
<td><p>修正线性单元。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.softmax" title="tvm.relay.nn.softmax"><code class="xref py py-obj docutils literal notranslate"><span class="pre">softmax</span></code></a>(data[, axis])</p></td>
<td><p>计算 softmax。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.space_to_batch_nd" title="tvm.relay.nn.space_to_batch_nd"><code class="xref py py-obj docutils literal notranslate"><span class="pre">space_to_batch_nd</span></code></a>(data, block_shape, paddings)</p></td>
<td><p>Divide spatial dimensions of the data into a grid of blocks and interleave them into batch dim.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.space_to_depth" title="tvm.relay.nn.space_to_depth"><code class="xref py py-obj docutils literal notranslate"><span class="pre">space_to_depth</span></code></a>(data, block_size[, layout])</p></td>
<td><p>Convert spatial blocks into channels.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.sparse_add" title="tvm.relay.nn.sparse_add"><code class="xref py py-obj docutils literal notranslate"><span class="pre">sparse_add</span></code></a>(dense_mat, sparse_mat)</p></td>
<td><p>Computes the matrix addition of <cite>dense_mat</cite> and <cite>sparse_mat</cite>, where <cite>dense_mat</cite> is a dense matrix and <cite>sparse_mat</cite> is a sparse (CSR) namedtuple with fields <cite>data</cite>, <cite>indices</cite>, and <cite>indptr</cite>.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.sparse_dense" title="tvm.relay.nn.sparse_dense"><code class="xref py py-obj docutils literal notranslate"><span class="pre">sparse_dense</span></code></a>(dense_mat, sparse_mat[, sparse_lhs])</p></td>
<td><p>Computes the matrix multiplication of <cite>dense_mat</cite> and <cite>sparse_mat</cite>, where <cite>dense_mat</cite> is a dense matrix and <cite>sparse_mat</cite> is a sparse (either BSR or CSR) namedtuple with fields <cite>data</cite>, <cite>indices</cite>, and <cite>indptr</cite>.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.sparse_transpose" title="tvm.relay.nn.sparse_transpose"><code class="xref py py-obj docutils literal notranslate"><span class="pre">sparse_transpose</span></code></a>(x)</p></td>
<td><p>Computes the fast matrix transpose of x, where x is a sparse tensor in CSR format (represented as a namedtuple with fields <cite>data</cite>, <cite>indices</cite>, and <cite>indptr</cite>).</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="#tvm.relay.nn.upsampling" title="tvm.relay.nn.upsampling"><code class="xref py py-obj docutils literal notranslate"><span class="pre">upsampling</span></code></a>(data[, scale_h, scale_w, layout, …])</p></td>
<td><p>上采样。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="#tvm.relay.nn.upsampling3d" title="tvm.relay.nn.upsampling3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">upsampling3d</span></code></a>(data[, scale_d, scale_h, …])</p></td>
<td><p>3D 上采样。</p></td>
</tr>
</tbody>
</table>
<dl class="py class">
<dt class="sig sig-object py" id="tvm.relay.nn.Constant">
<em class="property"><span class="pre">class</span> </em><span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">Constant</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.Constant" title="永久链接至目标">¶</a></dt>
<dd><p>A constant expression in Relay.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><p><strong>data</strong> (<a class="reference internal" href="../ndarray.html#tvm.nd.NDArray" title="tvm.nd.NDArray"><em>tvm.nd.NDArray</em></a>) – The data content of the constant expression.</p>
</dd>
</dl>
</dd></dl>

<dl class="py attribute">
<dt class="sig sig-object py" id="tvm.relay.nn.Expr">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">Expr</span></span><a class="headerlink" href="#tvm.relay.nn.Expr" title="永久链接至目标">¶</a></dt>
<dd><p>alias of <a class="reference internal" href="../ir.html#tvm.ir.RelayExpr" title="tvm.ir.expr.RelayExpr"><code class="xref py py-class docutils literal notranslate"><span class="pre">tvm.ir.expr.RelayExpr</span></code></a>
<strong>Attributes:</strong></p>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 90%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p><code class="xref py py-obj docutils literal notranslate"><span class="pre">checked_type</span></code></p></td>
<td><p>Get the checked type of tvm.relay.Expr.</p></td>
</tr>
</tbody>
</table>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.adaptive_avg_pool1d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">adaptive_avg_pool1d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">output_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.adaptive_avg_pool1d" title="永久链接至目标">¶</a></dt>
<dd><p>1D adaptive average pooling operator. This operator is experimental.</p>
<p>This operator takes data as input and does 1D average value calculation
across each window represented by W.</p>
<p>In the default case, where the data_layout is <cite>NCW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, width)</cite>,
to produce an output Tensor with shape
(batch_size, in_channels, output_width).</p>
<p>The pooling kernel and stride sizes are automatically chosen for
desired output sizes.</p>
<dl>
<dt>For output_size:</dt><dd><p>If this argument is not provided, input height and width will be used
as output width.</p>
<p>If a single integer is provided for output_size, the output size is
(N x C x output_size) for any input (NCW).</p>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>output_size</strong> (<em>tuple of int. optional</em>) – Output height and width.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.adaptive_avg_pool2d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">adaptive_avg_pool2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">output_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.adaptive_avg_pool2d" title="永久链接至目标">¶</a></dt>
<dd><p>2D adaptive average pooling operator. This operator is experimental.</p>
<p>This operator takes data as input and does 2D average value calculation
across each window represented by WxH.</p>
<p>In the default case, where the data_layout is <cite>NCHW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, height, width)</cite>,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).</p>
<p>The pooling kernel and stride sizes are automatically chosen for
desired output sizes.</p>
<dl>
<dt>For output_size:</dt><dd><p>If this argument is not provided, input height and width will be used
as output height and width.</p>
<p>If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).</p>
<p>If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).</p>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>output_size</strong> (<em>tuple of int. optional</em>) – Output height and width.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.adaptive_avg_pool3d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">adaptive_avg_pool3d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">output_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCDHW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.adaptive_avg_pool3d" title="永久链接至目标">¶</a></dt>
<dd><p>3D adaptive avg pooling operator. This operator is experimental.</p>
<p>This operator takes data as input and does 3D avg value calculation
across each window represented by DxWxH.</p>
<p>In the default case, where the data_layout is <cite>NCDHW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, depth, height, width)</cite>,
to produce an output Tensor with shape
(batch_size, in_channels, output_depth, output_height, output_width).</p>
<p>The pooling kernel and stride sizes are automatically chosen for
desired output sizes.</p>
<dl>
<dt>For output_size:</dt><dd><p>If this argument is not provided, input depth, height and width will be used
as output depth, height and width.</p>
<p>If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size x output_size) for any input (NCDHW).</p>
<p>If a tuple of integers (depth, height, width) are provided for output_size,
the output size is (N x C x depth x height x width) for any input (NCDHW).</p>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>output_size</strong> (<em>tuple of int. optional</em>) – Output height and width.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.adaptive_max_pool1d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">adaptive_max_pool1d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">output_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.adaptive_max_pool1d" title="永久链接至目标">¶</a></dt>
<dd><p>1D adaptive max pooling operator. This operator is experimental.</p>
<p>This operator takes data as input and does 1D max value calculation
across each window represented by W.</p>
<p>In the default case, where the data_layout is <cite>NCW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, width)</cite>,
to produce an output Tensor with shape
(batch_size, in_channels, output_width).</p>
<p>The pooling kernel and stride sizes are automatically chosen for
desired output sizes.</p>
<dl>
<dt>For output_size:</dt><dd><p>If this argument is not provided, input height and width will be used
as output height and width.</p>
<p>If a single integer is provided for output_size, the output size is
(N x C x output_size) for any input (NCW).</p>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>output_size</strong> (<em>tuple of int. optional</em>) – Output height and width.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.adaptive_max_pool2d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">adaptive_max_pool2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">output_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.adaptive_max_pool2d" title="永久链接至目标">¶</a></dt>
<dd><p>2D adaptive max pooling operator. This operator is experimental.</p>
<p>This operator takes data as input and does 2D max value calculation
across each window represented by WxH.</p>
<p>In the default case, where the data_layout is <cite>NCHW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, height, width)</cite>,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).</p>
<p>The pooling kernel and stride sizes are automatically chosen for
desired output sizes.</p>
<dl>
<dt>For output_size:</dt><dd><p>If this argument is not provided, input height and width will be used
as output height and width.</p>
<p>If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).</p>
<p>If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).</p>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>output_size</strong> (<em>tuple of int. optional</em>) – Output height and width.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.adaptive_max_pool3d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">adaptive_max_pool3d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">output_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCDHW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.adaptive_max_pool3d" title="永久链接至目标">¶</a></dt>
<dd><p>3D adaptive max pooling operator. This operator is experimental.</p>
<p>This operator takes data as input and does 3D max value calculation
across each window represented by DxWxH.</p>
<p>In the default case, where the data_layout is <cite>NCDHW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, depth, height, width)</cite>,
to produce an output Tensor with shape
(batch_size, in_channels, output_depth, output_height, output_width).</p>
<p>The pooling kernel and stride sizes are automatically chosen for
desired output sizes.</p>
<dl>
<dt>For output_size:</dt><dd><p>If this argument is not provided, input depth, height and width will be used
as output depth, height and width.</p>
<p>If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size x output_size) for any input (NCDHW).</p>
<p>If a tuple of integers (depth, height, width) are provided for output_size,
the output size is (N x C x depth x height x width) for any input (NCDHW).</p>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>output_size</strong> (<em>tuple of int. optional</em>) – Output height and width.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.avg_pool1d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">avg_pool1d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pool_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ceil_mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">count_include_pad</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.avg_pool1d" title="永久链接至目标">¶</a></dt>
<dd><p>1D average pooling operator.</p>
<p>This operator takes data as input and does 1D average value calculation
with in pool_size sized window by striding defined by stride</p>
<p>In the default case, where the data_layout is <cite>NCW</cite>
a data Tensor with shape <cite>(batch_size, channels, width)</cite>,
to produce an output Tensor.</p>
<p>The ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>pool_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The size of window for pooling.</p></li>
<li><p><strong>strides</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The strides of pooling.</p></li>
<li><p><strong>dilation</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The dilation of pooling.</p></li>
<li><p><strong>padding</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The padding for pooling.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>ceil_mode</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To enable or disable ceil while pooling.</p></li>
<li><p><strong>count_include_pad</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To include padding to compute the average.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.avg_pool2d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">avg_pool2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pool_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ceil_mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">count_include_pad</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.avg_pool2d" title="永久链接至目标">¶</a></dt>
<dd><p>2D 平均池化算子。</p>
<p>This operator takes data as input and does 2D average value calculation
with in pool_size sized window by striding defined by stride</p>
<p>In the default case, where the data_layout is <cite>NCHW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, height, width)</cite>,
to produce an output Tensor with the following rule:</p>
<p>with data of shape (b, c, h, w), pool_size (kh, kw)</p>
<div class="math notranslate nohighlight">
\[\mbox{out}(b, c, y, x)  = \frac{1}{kh * kw} \sum_{m=0}^{kh-1} \sum_{n=0}^{kw-1}
     \mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)\]</div>
<p>Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>pool_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The size of window for pooling.</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of pooling.</p></li>
<li><p><strong>dilation</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The dilation of pooling.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding for pooling.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>ceil_mode</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To enable or disable ceil while pooling.</p></li>
<li><p><strong>count_include_pad</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To include padding to compute the average.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.avg_pool2d_grad">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">avg_pool2d_grad</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">out_grad</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pool_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ceil_mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">count_include_pad</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.avg_pool2d_grad" title="永久链接至目标">¶</a></dt>
<dd><p>Gradient of 2D average pooling operator.</p>
<p>This operator takes out_grad and data as input and calculates gradient of avg_pool2d.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>out_grad</strong> (<em>tvm.relay.Expr</em>) – The output gradient</p></li>
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>pool_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The size of window for pooling.</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of pooling.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding for pooling.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>ceil_mode</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To enable or disable ceil while pooling.</p></li>
<li><p><strong>count_include_pad</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To include padding to compute the average.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.avg_pool3d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">avg_pool3d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pool_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCDHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ceil_mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">count_include_pad</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.avg_pool3d" title="永久链接至目标">¶</a></dt>
<dd><p>3D 平均池化算子。</p>
<p>This operator takes data as input and does 3D average value calculation
with in pool_size sized window by striding defined by stride</p>
<p>In the default case, where the data_layout is <cite>NCDHW</cite>
a data Tensor with shape <cite>(batch_size, channels, depth, height, width)</cite>,
to produce an output Tensor.</p>
<p>The ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>pool_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The size of window for pooling.</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of pooling.</p></li>
<li><p><strong>dilation</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The dilation of pooling.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding for pooling.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>ceil_mode</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To enable or disable ceil while pooling.</p></li>
<li><p><strong>count_include_pad</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To include padding to compute the average.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.batch_flatten">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">batch_flatten</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.batch_flatten" title="永久链接至目标">¶</a></dt>
<dd><p>BatchFlatten.</p>
<p>This operator flattens all the dimensions except for the batch dimension.
which results a 2D output.</p>
<p>For data with shape <code class="docutils literal notranslate"><span class="pre">(d1,</span> <span class="pre">d2,</span> <span class="pre">...,</span> <span class="pre">dk)</span></code>
batch_flatten(data) returns reshaped output of shape <code class="docutils literal notranslate"><span class="pre">(d1,</span> <span class="pre">d2*...*dk)</span></code>.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The Flattened result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.batch_matmul">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">batch_matmul</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">tensor_a</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tensor_b</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">transpose_a</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">transpose_b</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.batch_matmul" title="永久链接至目标">¶</a></dt>
<dd><p>Compute batch matrix multiplication of <cite>tensor_a</cite> and <cite>tensor_b</cite>.</p>
<p>Both <cite>tensor_a</cite> and <cite>tensor_b</cite> can be transposed. For legacy reason, we use NT format
(transpose_a=False, transpose_b=True) by default.</p>
<div class="math notranslate nohighlight">
\[\mbox{batch_matmul}(A, B)[i, :, :] = \mbox{matmul}(A[i, :, :], B[i, :, :])\]</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>tensor_a</strong> (<em>tvm.relay.Expr</em>) – The first input.</p></li>
<li><p><strong>tensor_b</strong> (<em>tvm.relay.Expr</em>) – The second input.</p></li>
<li><p><strong>out_dtype</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Specifies the output data type for mixed precision batch matmul.</p></li>
<li><p><strong>transpose_a</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>] </em><em>= False</em>) – Whether the first tensor is in transposed format.</p></li>
<li><p><strong>transpose_b</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>] </em><em>= True</em>) – Whether the second tensor is in transposed format.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.batch_norm">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">batch_norm</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gamma</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">beta</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">moving_mean</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">moving_var</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">epsilon</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">center</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">scale</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.batch_norm" title="永久链接至目标">¶</a></dt>
<dd><p>Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.</p>
<div class="math notranslate nohighlight">
\[\begin{split}data\_mean[i] = mean(data[:,i,:,...]) \\
data\_var[i] = var(data[:,i,:,...])\end{split}\]</div>
<p>Then compute the normalized output, which has the same shape as input, as following:</p>
<div class="math notranslate nohighlight">
\[out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}}
    * gamma[i] + beta[i]\]</div>
<p>Both <em>mean</em> and <em>var</em> returns a scalar by treating the input as a vector.</p>
<p>Assume the input has size <em>k</em> on axis 1, then both <code class="docutils literal notranslate"><span class="pre">gamma</span></code> and <code class="docutils literal notranslate"><span class="pre">beta</span></code>
have shape <em>(k,)</em>.</p>
<p>Besides the inputs and the outputs, this operator accepts two auxiliary
states, <code class="docutils literal notranslate"><span class="pre">moving_mean</span></code> and <code class="docutils literal notranslate"><span class="pre">moving_var</span></code>, which are <em>k</em>-length
vectors. They are global statistics for the whole dataset, which are updated by</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">moving_mean</span> <span class="o">=</span> <span class="n">moving_mean</span> <span class="o">*</span> <span class="n">momentum</span> <span class="o">+</span> <span class="n">data_mean</span> <span class="o">*</span> <span class="p">(</span><span class="mi">1</span> <span class="o">-</span> <span class="n">momentum</span><span class="p">)</span>
<span class="n">moving_var</span> <span class="o">=</span> <span class="n">moving_var</span> <span class="o">*</span> <span class="n">momentum</span> <span class="o">+</span> <span class="n">data_var</span> <span class="o">*</span> <span class="p">(</span><span class="mi">1</span> <span class="o">-</span> <span class="n">momentum</span><span class="p">)</span>
</pre></div>
</div>
<p>The parameter <code class="docutils literal notranslate"><span class="pre">axis</span></code> specifies which axis of the input shape denotes
the ‘channel’ (separately normalized groups).  The default is 1.
Specifying -1 sets the channel axis to be the last item in the input shape.</p>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>This operator can be optimized away for inference.</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – Input to which batch_norm will be applied.</p></li>
<li><p><strong>gamma</strong> (<em>tvm.relay.Expr</em>) – The gamma scale factor.</p></li>
<li><p><strong>beta</strong> (<em>tvm.relay.Expr</em>) – The beta offset factor.</p></li>
<li><p><strong>moving_mean</strong> (<em>tvm.relay.Expr</em>) – Running mean of input,</p></li>
<li><p><strong>moving_var</strong> (<em>tvm.relay.Expr</em>) – Running variance of input.</p></li>
<li><p><strong>axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em><em>, </em><em>default=1</em>) – Specify along which shape axis the channel is specified.</p></li>
<li><p><strong>epsilon</strong> (<em>double</em><em>, </em><em>optional</em><em>, </em><em>default=1e-5</em>) – Small float added to variance to avoid dividing by zero.</p></li>
<li><p><strong>center</strong> (<em>boolean</em><em>, </em><em>optional</em><em>, </em><em>default=True</em>) – If True, add offset of beta to normalized tensor, If False,
beta is ignored.</p></li>
<li><p><strong>scale</strong> (<em>boolean</em><em>, </em><em>optional</em><em>, </em><em>default=True</em>) – If true, multiply by gamma. If False, gamma is not used.
When the next layer is piecewise linear (also e.g. nn.relu),
this can be disabled since the scaling will be done by the next layer.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – Tuple of normed data (same shape as input),
new running mean (k-length vector),
and new running variance (k-length vector)</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>relay.Tuple([tvm.relay.Expr, tvm.relay.Expr, tvm.relay.Expr])</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.batch_to_space_nd">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">batch_to_space_nd</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">block_shape</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">crops</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.batch_to_space_nd" title="永久链接至目标">¶</a></dt>
<dd><p>Reshape the batch dimension into spatial dimensions.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<a class="reference internal" href="../te.html#tvm.te.Tensor" title="tvm.te.Tensor"><em>tvm.te.Tensor</em></a>) – N-D with shape [batch, spatial_shape, remaining_shape]</p></li>
<li><p><strong>block_shape</strong> (<em>relay.Expr</em>) – 1-D of size [M] where M is number of spatial dims, specifies block size
for each spatial dimension.</p></li>
<li><p><strong>crops</strong> (<em>relay.Expr</em>) – 2-D of shape [M, 2] where M is number of spatial dims, specifies
[begin, end] crop size for each spatial dimension.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – N-D Tensor with shape
[batch / prod(block_shape),
in_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], …,
in_shape[M] * block_shape[M-1] - crops[M-1, 0] - crops[M-1, 1],
remaining_shape]</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.bias_add">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">bias_add</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.bias_add" title="永久链接至目标">¶</a></dt>
<dd><p>add_bias 算子。</p>
<p>Add 1D bias to the axis of data.
This function is a special case of add which allows
inference of shape of the bias from data.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>bias</strong> (<em>tvm.relay.Expr</em>) – The bias to be added.</p></li>
<li><p><strong>axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – The axis to add the bias.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The final result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.bitpack">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">bitpack</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bits</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pack_axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bit_axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">2</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pack_type</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'uint32'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">name</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'BitPack'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.bitpack" title="永久链接至目标">¶</a></dt>
<dd><p>对于位序列算子的张量包装。</p>
<p>The values along the input tensor’s pack_axis are quantized
and packed together into the specified pack_type in a new bit axis.</p>
<p>For example, consider bitpacking with data to be a tensor with shape <cite>[1, 64, 128, 128]</cite>,
pack_axis=1, bit_axis=4, pack_type=uint8, and bits=2. The output in this case will
be of shape <cite>[1, 8, 128, 128, 2]</cite>. The dimension of axis 1 has been reduced by a factor
of 8 since each value is packed into an 8-bit uint8. Axis 4 is now two bitplanes
representing the quantized value of the incoming data. The output tensor is now
ready to be used in a bitserial operation.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.expr</em>) – The incoming tensor to be packed.</p></li>
<li><p><strong>bits</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Number of bits that should be packed.</p></li>
<li><p><strong>pack_axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Axis that should be decomposed and packed.</p></li>
<li><p><strong>bit_axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – New axis containing bitplane.</p></li>
<li><p><strong>pack_type</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a>) – Datatype to pack bits into.</p></li>
<li><p><strong>name</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Name of the operation.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The packed tensor.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.bitserial_conv2d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">bitserial_conv2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(3,</span> <span class="pre">3)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">activation_bits</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight_bits</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pack_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'uint32'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'int16'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">unipolar</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.bitserial_conv2d" title="永久链接至目标">¶</a></dt>
<dd><p>使用位序列计算的 2D 卷积。</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding of convolution on both sides of inputs before convolution.</p></li>
<li><p><strong>channels</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>activation_bits</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Number of bits to pack for activations.</p></li>
<li><p><strong>weight_bits</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Number of bits to pack for weights.</p></li>
<li><p><strong>data_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the kernel</p></li>
<li><p><strong>pack_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Datatype to pack bits into.</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.bitserial_dense">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">bitserial_dense</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">units</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_bits</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight_bits</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pack_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'uint32'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'int16'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">unipolar</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.bitserial_dense" title="永久链接至目标">¶</a></dt>
<dd><p>Bitserial Dense operator.
Applies matrix multiplication of two quantized matrices
using a fast bitserial algorithm.</p>
<div class="math notranslate nohighlight">
\[\]</div>
<p><cite>Y = X * W</cite></p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>units</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of hidden units of the dense transformation.</p></li>
<li><p><strong>data_bits</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Number of bits incoming tensor should be packed with.</p></li>
<li><p><strong>weight_bits</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Number of bits weight tensor should be packed with.</p></li>
<li><p><strong>pack_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Datatype to pack individual bits into before computation.</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision dense.</p></li>
<li><p><strong>unipolar</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – Whether to use unipolar or bipolar quantization for inputs.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.const">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">const</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">value</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.const" title="永久链接至目标">¶</a></dt>
<dd><p>Create a constant value.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>value</strong> (<em>Union</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a><em>, </em><a class="reference external" href="https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html#numpy.ndarray" title="(在 NumPy v1.21)"><em>numpy.ndarray</em></a><em>, </em><a class="reference internal" href="../ndarray.html#tvm.nd.NDArray" title="tvm.nd.NDArray"><em>tvm.nd.NDArray</em></a><em>]</em>) – The constant value.</p></li>
<li><p><strong>dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – The data type of the resulting constant.</p></li>
</ul>
</dd>
</dl>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>When dtype is None, we use the following rule:</p>
<ul class="simple">
<li><p>int maps to “int32”</p></li>
<li><p>float maps to “float32”</p></li>
<li><p>bool maps to “bool”</p></li>
<li><p>other using the same default rule as numpy.</p></li>
</ul>
</div>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.contrib_conv2d_gemm_weight_transform">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">contrib_conv2d_gemm_weight_transform</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">weights</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tile_rows</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tile_cols</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.contrib_conv2d_gemm_weight_transform" title="永久链接至目标">¶</a></dt>
<dd><p>Weight Transformation part for 2D convolution with gemm algorithm.</p>
<p>We separate this as a single op to enable pre-compute for inference.
Use this together with nn.contrib_conv2d_gemm_without_weight_transform</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>weights</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>tile_rows</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Tile rows of the weight transformation for ConvGemm.</p></li>
<li><p><strong>tile_cols</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Tile columns of the weight transformation for ConvGemm.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.contrib_conv2d_gemm_without_weight_transform">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">contrib_conv2d_gemm_without_weight_transform</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.contrib_conv2d_gemm_without_weight_transform" title="永久链接至目标">¶</a></dt>
<dd><p>2D convolution with gemm algorithm.</p>
<p>The basic parameters are the same as the ones in vanilla conv2d.
It assumes the weight is pre-transformed by nn.contrib_conv2d_gemm_weight_transform</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding of convolution on both sides of inputs before convolution.</p></li>
<li><p><strong>dilation</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>groups</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of groups for grouped convolution.</p></li>
<li><p><strong>channels</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>data_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.contrib_conv2d_nchwc">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">contrib_conv2d_nchwc</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW8c'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.contrib_conv2d_nchwc" title="永久链接至目标">¶</a></dt>
<dd><p>Variant of 2D convolution.</p>
<p>This operator takes the weight as the convolution kernel
and convolves it with data to produce an output, following a specialized
NCHWc data layout.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>kernel</strong> (<em>tvm.relay.Expr</em>) – The kernel expressions.</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding of convolution on both sides of inputs before convolution.</p></li>
<li><p><strong>dilation</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>groups</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of groups for grouped convolution.</p></li>
<li><p><strong>channels</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>data_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.contrib_conv2d_winograd_nnpack_weight_transform">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">contrib_conv2d_winograd_nnpack_weight_transform</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">convolution_algorithm</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.contrib_conv2d_winograd_nnpack_weight_transform" title="永久链接至目标">¶</a></dt>
<dd><p>基于 Winograd 算法对 2D 卷积的转换部分进行加权。</p>
<p>We separate this as a single op to enable pre-compute for inference.
Use this together with nn.contrib_conv2d_winograd_without_weight_transform</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>convolution_algorithm</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.contrib_conv2d_winograd_weight_transform">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">contrib_conv2d_winograd_weight_transform</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tile_size</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.contrib_conv2d_winograd_weight_transform" title="永久链接至目标">¶</a></dt>
<dd><p>基于 Winograd 算法对 2D 卷积的转换部分进行加权。</p>
<p>We separate this as a single op to enable pre-compute for inference.
Use this together with nn.contrib_conv2d_winograd_without_weight_transform</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>tile_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.contrib_conv2d_winograd_without_weight_transform">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">contrib_conv2d_winograd_without_weight_transform</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tile_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.contrib_conv2d_winograd_without_weight_transform" title="永久链接至目标">¶</a></dt>
<dd><p>基于 Winograd 算法的 2D 卷积。</p>
<p>The basic parameters are the same as the ones in vanilla conv2d.
It assumes the weight is pre-transformed by nn.contrib_conv2d_winograd_weight_transform</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>tile_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding of convolution on both sides of inputs before convolution.</p></li>
<li><p><strong>dilation</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>groups</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of groups for grouped convolution.</p></li>
<li><p><strong>channels</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>data_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.contrib_conv3d_winograd_weight_transform">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">contrib_conv3d_winograd_weight_transform</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tile_size</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.contrib_conv3d_winograd_weight_transform" title="永久链接至目标">¶</a></dt>
<dd><p>基于 Winograd 算法对 3D 卷积的转换部分进行加权。</p>
<p>We separate this as a single op to enable pre-compute for inference.
Use this together with nn.contrib_conv3d_winograd_without_weight_transform</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>tile_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – The Tile size of winograd. E.g. 2 for F(2x2x2, 3x3x3) and 4 for F(4x4x4, 3x3x3)</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.contrib_conv3d_winograd_without_weight_transform">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">contrib_conv3d_winograd_without_weight_transform</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tile_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCDHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIDHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.contrib_conv3d_winograd_without_weight_transform" title="永久链接至目标">¶</a></dt>
<dd><p>基于 Winograd 算法的 3D 卷积。</p>
<p>The basic parameters are the same as the ones in vanilla conv3d.
It assumes the weight is pre-transformed by nn.contrib_conv3d_winograd_weight_transform</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>tile_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – The Tile size of winograd. E.g. 2 for F(2x2x2, 3x3x3) and 4 for F(4x4x4, 3x3x3)</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding of convolution on both sides of inputs before convolution.</p></li>
<li><p><strong>dilation</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>groups</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of groups for grouped convolution.</p></li>
<li><p><strong>channels</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>data_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.contrib_dense_pack">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">contrib_dense_pack</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NC'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">units</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.contrib_dense_pack" title="永久链接至目标">¶</a></dt>
<dd><p>Dense operator.
Applies a linear transformation with packed weight</p>
<div class="math notranslate nohighlight">
\[\]</div>
<p><cite>Y = X * W^T</cite></p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator,
of shape <cite>(batch, units_in)</cite>.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The transformed weight expressions, 3-D matrix,
of shape <cite>(units // pack_weight_tile, units_in, pack_weight_tile)</cite>.</p></li>
<li><p><strong>weight_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a>) – The layout of weight, such as “NC” or “NC8n”.</p></li>
<li><p><strong>units</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of hidden units of the dense transformation.</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision dense.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.contrib_depthwise_conv2d_nchwc">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">contrib_depthwise_conv2d_nchwc</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW8c'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.contrib_depthwise_conv2d_nchwc" title="永久链接至目标">¶</a></dt>
<dd><p>Variant of 2D depthwise convolution.</p>
<p>This operator takes the weight as the depthwise convolution kernel
and depthwise convolves it with data to produce an output, following a specialized
NCHWc data layout.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>kernel</strong> (<em>tvm.relay.Expr</em>) – The kernel expressions.</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding of convolution on both sides of inputs before convolution.</p></li>
<li><p><strong>dilation</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>groups</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of groups for grouped convolution.</p></li>
<li><p><strong>channels</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>data_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.conv1d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">conv1d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.conv1d" title="永久链接至目标">¶</a></dt>
<dd><p>1D convolution.</p>
<p>This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.</p>
<p>In the default case, where the data_layout is <cite>NCW</cite>
and kernel_layout is <cite>OIW</cite>, conv1d takes in
a data Tensor with shape <cite>(batch_size, in_channels, width)</cite>,
and a weight Tensor with shape <cite>(channels, in_channels, kernel_size)</cite>
to produce an output Tensor with the following rule:</p>
<div class="math notranslate nohighlight">
\[\mbox{out}[b, c, w] = \sum_{dw, k}
   \mbox{data}[b, k, \mbox{strides}[0] * w + dw] *
   \mbox{weight}[c, k, dw]\]</div>
<p>Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(<cite>NCW</cite> for data and <cite>OIW</cite> for weight), perform the computation,
then convert to the out_layout.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>strides</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The padding of convolution on both sides of the input before convolution.</p></li>
<li><p><strong>dilation</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>groups</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em>) – Currently unused for 1D convolution.</p></li>
<li><p><strong>channels</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The spatial dimension of the convolution kernel.</p></li>
<li><p><strong>data_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>out_dtype</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.conv1d_transpose">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">conv1d_transpose</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">output_padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.conv1d_transpose" title="永久链接至目标">¶</a></dt>
<dd><p>One dimensional transposed convolution operator.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>strides</strong> (<em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>, </em><em>optional</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>, </em><em>optional</em>) – The padding of convolution on both sides of inputs.</p></li>
<li><p><strong>dilation</strong> (<em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>, </em><em>optional</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>channels</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>groups</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of groups for grouped convolution.</p></li>
<li><p><strong>data_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>output_padding</strong> (<em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>, </em><em>optional</em>) – Used to disambiguate the output shape.</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.conv2d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">conv2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.conv2d" title="永久链接至目标">¶</a></dt>
<dd><p>2D 卷积。</p>
<p>This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.</p>
<p>In the default case, where the data_layout is <cite>NCHW</cite>
and kernel_layout is <cite>OIHW</cite>, conv2d takes in
a data Tensor with shape <cite>(batch_size, in_channels, height, width)</cite>,
and a weight Tensor with shape <cite>(channels, in_channels, kernel_size[0], kernel_size[1])</cite>
to produce an output Tensor with the following rule:</p>
<div class="math notranslate nohighlight">
\[\mbox{out}[b, c, y, x] = \sum_{dy, dx, k}
   \mbox{data}[b, k, \mbox{strides}[0] * y  + dy, \mbox{strides}[1] * x + dx] *
   \mbox{weight}[c, k, dy, dx]\]</div>
<p>Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(<cite>NCHW</cite> for data and <cite>OIHW</cite> for weight), perform the computation,
then convert to the out_layout.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>strides</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The padding of convolution on both sides of inputs before convolution.</p></li>
<li><p><strong>dilation</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>groups</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em>) – Number of groups for grouped convolution.</p></li>
<li><p><strong>channels</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>data_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>out_dtype</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.conv2d_transpose">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">conv2d_transpose</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">output_padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.conv2d_transpose" title="永久链接至目标">¶</a></dt>
<dd><p>二维转置卷积算子。</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>strides</strong> (<em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>, </em><em>optional</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>, </em><em>optional</em>) – The padding of convolution on both sides of inputs.</p></li>
<li><p><strong>dilation</strong> (<em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>, </em><em>optional</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>channels</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>groups</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of groups for grouped convolution.</p></li>
<li><p><strong>data_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>output_padding</strong> (<em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>, </em><em>optional</em>) – Used to disambiguate the output shape.</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.conv3d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">conv3d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCDHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIDHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.conv3d" title="永久链接至目标">¶</a></dt>
<dd><p>3D 卷积。</p>
<p>This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.</p>
<p>In the default case, where the data_layout is <cite>NCDHW</cite>
and kernel_layout is <cite>OIDHW</cite>, conv3d takes in
a data Tensor with shape <cite>(batch_size, in_channels, depth, height, width)</cite>,
and a weight Tensor with shape <cite>(channels, in_channels, kernel_size[0], kernel_size[1],
kernel_size[2])</cite> to produce an output Tensor with the following rule:</p>
<div class="math notranslate nohighlight">
\[\mbox{out}[b, c, z, y, x] = \sum_{dz, dy, dx, k}
   \mbox{data}[b, k, \mbox{strides}[0] * z  + dz, \mbox{strides}[1] * y  + dy,
   \mbox{strides}[2] * x + dx] * \mbox{weight}[c, k, dz, dy, dx]\]</div>
<p>Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(<cite>NCDHW</cite> for data and <cite>OIDHW</cite> for weight), perform the computation,
then convert to the out_layout.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>strides</strong> (<em>Optional</em><em>[</em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The padding of convolution on both sides of inputs before convolution.</p></li>
<li><p><strong>dilation</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>groups</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em>) – Number of groups for grouped convolution.</p></li>
<li><p><strong>channels</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>data_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>out_dtype</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.conv3d_transpose">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">conv3d_transpose</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCDHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIDHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">output_padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.conv3d_transpose" title="永久链接至目标">¶</a></dt>
<dd><p>3D 转置卷积。</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>strides</strong> (<em>Optional</em><em>[</em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The padding of convolution on both sides of inputs before convolution.</p></li>
<li><p><strong>dilation</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>groups</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em>) – Number of groups for grouped convolution.</p></li>
<li><p><strong>channels</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em><em>]</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>data_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>out_dtype</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Specifies the output data type for mixed precision conv3d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.correlation">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">correlation</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data2</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_displacement</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">stride1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">stride2</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">is_multiply</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.correlation" title="永久链接至目标">¶</a></dt>
<dd><p>Applies correlation to inputs.</p>
<p>The correlation layer performs multiplicative patch comparisons between two feature maps.
Given two multi-channel feature maps <span class="math notranslate nohighlight">\(f_{1}, f_{2}\)</span>, with <span class="math notranslate nohighlight">\(w\)</span>, <span class="math notranslate nohighlight">\(h\)</span>, and
<span class="math notranslate nohighlight">\(c\)</span> being their width, height, and number of channels, the correlation layer lets the
network compare each patch from <span class="math notranslate nohighlight">\(f_{1}\)</span> with each patch from <span class="math notranslate nohighlight">\(f_{2}\)</span>.</p>
<p>For now we consider only a single comparison of two patches. The ‘correlation’ of two patches
centered at <span class="math notranslate nohighlight">\(x_{1}\)</span> in the first map and <span class="math notranslate nohighlight">\(x_{2}\)</span> in the second map is then defined
as:</p>
<div class="math notranslate nohighlight">
\[c(x_{1}, x_{2}) = \sum_{o \in [-k,k] \times [-k,k]} &lt;f_{1}(x_{1} + o), f_{2}(x_{2} + o)&gt;\]</div>
<p>for a square patch of size <span class="math notranslate nohighlight">\(K:=2k+1\)</span>.</p>
<p>Note that the equation above is identical to one step of a convolution in neural networks, but
instead of convolving data with a filter, it convolves data with other    data. For this
reason, it has no training weights.</p>
<p>Computing <span class="math notranslate nohighlight">\(c(x_{1}, x_{2})\)</span> involves <span class="math notranslate nohighlight">\(c * K^{2}\)</span> multiplications. Comparing all
patch combinations involves <span class="math notranslate nohighlight">\(w^{2}*h^{2}\)</span> such computations.</p>
<p>Given a maximum displacement <span class="math notranslate nohighlight">\(d\)</span>, for each location <span class="math notranslate nohighlight">\(x_{1}\)</span> it computes
correlations <span class="math notranslate nohighlight">\(c(x_{1}, x_{2})\)</span> only in a neighborhood of size <span class="math notranslate nohighlight">\(D:=2d+1\)</span>,
by limiting the range of <span class="math notranslate nohighlight">\(x_{2}\)</span>. We use strides <span class="math notranslate nohighlight">\(s_{1}, s_{2}\)</span>, to quantize
<span class="math notranslate nohighlight">\(x_{1}\)</span> globally and to quantize <span class="math notranslate nohighlight">\(x_{2}\)</span> within the neighborhood
centered around <span class="math notranslate nohighlight">\(x_{1}\)</span>.</p>
<p>The final output is defined by the following expression:</p>
<div class="math notranslate nohighlight">
\[out[n, q, i, j] = c(x_{i, j}, x_{q})\]</div>
<p>where <span class="math notranslate nohighlight">\(i\)</span> and <span class="math notranslate nohighlight">\(j\)</span> enumerate spatial locations in <span class="math notranslate nohighlight">\(f_{1}\)</span>, and <span class="math notranslate nohighlight">\(q\)</span>
denotes the <span class="math notranslate nohighlight">\(q^{th}\)</span> neighborhood of <span class="math notranslate nohighlight">\(x_{i,j}\)</span>.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data1</strong> (<a class="reference internal" href="../te.html#tvm.te.Tensor" title="tvm.te.Tensor"><em>tvm.te.Tensor</em></a>) – 4-D with shape [batch, channel, height, width]</p></li>
<li><p><strong>data2</strong> (<a class="reference internal" href="../te.html#tvm.te.Tensor" title="tvm.te.Tensor"><em>tvm.te.Tensor</em></a>) – 4-D with shape [batch, channel, height, width]</p></li>
<li><p><strong>kernel_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Kernel size for correlation, must be an odd number</p></li>
<li><p><strong>max_displacement</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Max displacement of Correlation</p></li>
<li><p><strong>stride1</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Stride for data1</p></li>
<li><p><strong>stride2</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Stride for data2 within the neightborhood centered around data1</p></li>
<li><p><strong>padding</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>a list/tuple of 2</em><em> or </em><em>4 ints</em>) – Padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints</p></li>
<li><p><strong>is_multiply</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a>) – operation type is either multiplication or substraction</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a>) – layout of data1, data2 and the output</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>Output</strong> – 4-D with shape [batch, out_channel, out_height, out_width]</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p><a class="reference internal" href="../te.html#tvm.te.Tensor" title="tvm.te.Tensor">tvm.te.Tensor</a></p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.cross_entropy">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">cross_entropy</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">predictions</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">targets</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.cross_entropy" title="永久链接至目标">¶</a></dt>
<dd><p>CrossEntropy without logits.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>predictions</strong> (<em>tvm.relay.Expr</em>) – The predictions.</p></li>
<li><p><strong>targets</strong> (<em>tvm.relay.Expr</em>) – The targets.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.cross_entropy_with_logits">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">cross_entropy_with_logits</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">predictions</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">targets</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.cross_entropy_with_logits" title="永久链接至目标">¶</a></dt>
<dd><p>CrossEntropy with logits.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>predictions</strong> (<em>tvm.relay.Expr</em>) – The predictions.</p></li>
<li><p><strong>targets</strong> (<em>tvm.relay.Expr</em>) – The targets.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.deformable_conv2d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">deformable_conv2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">offset</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">deformable_groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">groups</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">channels</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">kernel_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'OIHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.deformable_conv2d" title="永久链接至目标">¶</a></dt>
<dd><p>Deformable 2d convolution.</p>
<p>The deformable convolution operation is described in <a class="reference external" href="https://arxiv.org/abs/1703.06211">https://arxiv.org/abs/1703.06211</a></p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>offset</strong> (<em>tvm.relay.Expr</em>) – The offset expressions.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions.</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of convolution.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding of convolution on both sides of inputs before convolution.</p></li>
<li><p><strong>dilation</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – Specifies the dilation rate to be used for dilated convolution.</p></li>
<li><p><strong>deformable_groups</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of deformable groups.</p></li>
<li><p><strong>groups</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of groups for grouped convolution.</p></li>
<li><p><strong>channels</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of output channels of this convolution.</p></li>
<li><p><strong>kernel_size</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The spatial of the convolution kernel.</p></li>
<li><p><strong>data_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>kernel_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the weight.</p></li>
<li><p><strong>out_layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the output, by default, out_layout is the same as data_layout</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision conv2d.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.dense">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">dense</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weight</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">units</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.dense" title="永久链接至目标">¶</a></dt>
<dd><p>Dense operator.
Applies a linear transformation</p>
<div class="math notranslate nohighlight">
\[\]</div>
<p><cite>Y = X * W^T</cite></p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator,
of shape <cite>(d_1, d_2, …, d_n, units_in)</cite>.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The weight expressions, 2-D matrix,
of shape <cite>(units, units_in)</cite>.</p></li>
<li><p><strong>units</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Number of hidden units of the dense transformation.</p></li>
<li><p><strong>out_dtype</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Specifies the output data type for mixed precision dense,
of shape <cite>(d_1, d_2, …, d_n, units)</cite>.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.depth_to_space">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">depth_to_space</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">block_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'DCR'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.depth_to_space" title="永久链接至目标">¶</a></dt>
<dd><p>Convert channels into spatial blocks.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – Input data with channels divisible by block_size**2</p></li>
<li><p><strong>block_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Size of blocks to convert channels into.</p></li>
<li><p><strong>layout</strong> (<em>string</em>) – One of NCHW or NHWC, indicates channel axis.</p></li>
<li><p><strong>mode</strong> (<em>string</em>) – One of DCR or CDR, indicates which order channels
are accessed in.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><p><strong>result</strong> –</p>
<dl class="simple">
<dt>Tensor with shape [in_batch, in_channel / block_size * block_size,</dt><dd><p>in_height * block_size, in_width * block_size]</p>
</dd>
</dl>
</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.dilate">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">dilate</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation_value</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.0</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.dilate" title="永久链接至目标">¶</a></dt>
<dd><p>Dilate data with given dilation value (0 by default).</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – n-D, can be any layout.</p></li>
<li><p><strong>strides</strong> (<em>tuple of &lt;int&gt;</em>) – Dilation stride on each dimension, 1 means no dilation.</p></li>
<li><p><strong>dilation_value</strong> (<em>int/float</em><em>, </em><em>optional</em>) – Value used to dilate the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>Output</strong> – The computed result</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.dropout">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">dropout</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rate</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.5</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.dropout" title="永久链接至目标">¶</a></dt>
<dd><p>对输入数列使用 dropout 算子。</p>
<p>During training, each element of the input is set to zero with
probability <code class="docutils literal notranslate"><span class="pre">p</span></code>. The whole array is rescaled by <code class="docutils literal notranslate"><span class="pre">1/(1-p)</span></code>
to keep the expected sum of the input unchanged.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>rate</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a><em>, </em><em>optional</em><em> (</em><em>default=0.5</em><em>)</em>) – The probability for an element to be reset to 0.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The result of dropout</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.dropout_raw">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">dropout_raw</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">rate</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.5</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.dropout_raw" title="永久链接至目标">¶</a></dt>
<dd><p>对输入数列使用 dropout 算子。</p>
<p>During training, each element of the input is set to zero with
probability <code class="docutils literal notranslate"><span class="pre">p</span></code>. The whole array is rescaled by <code class="docutils literal notranslate"><span class="pre">1/(1-p)</span></code>
to keep the expected sum of the input unchanged.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>rate</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a><em>, </em><em>optional</em><em> (</em><em>default=0.5</em><em>)</em>) – The probability for an element to be reset to 0.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The result of dropout</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.fast_softmax">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">fast_softmax</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">-</span> <span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.fast_softmax" title="永久链接至目标">¶</a></dt>
<dd><p>Computes softmax.
Use approximation to compute exponent for faster speed.</p>
<div class="math notranslate nohighlight">
\[\text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}\]</div>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>This operator can be optimized away for inference.</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – The axis to sum over when computing softmax</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.fifo_buffer">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">fifo_buffer</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">buffer</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.fifo_buffer" title="永久链接至目标">¶</a></dt>
<dd><p>FIFO buffer to enable computation reuse in CNNs with sliding indow input</p>
<p>Compute equivalent of</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">concat</span><span class="p">(</span><span class="nb">buffer</span><span class="p">,</span> <span class="n">data</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="n">axis</span><span class="p">)</span>
<span class="o">.</span><span class="n">slice_axis</span><span class="p">(</span><span class="n">axis</span><span class="o">=</span><span class="n">axis</span><span class="p">,</span>
            <span class="n">begin</span><span class="o">=</span><span class="n">data</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="n">axis</span><span class="p">],</span>
            <span class="n">end</span><span class="o">=</span><span class="n">data</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span><span class="o">+</span><span class="nb">buffer</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="n">axis</span><span class="p">])</span>
</pre></div>
</div>
<p>Useful for</p>
<ul class="simple">
<li><p>Encoding explicit re-use of computation in convolution ops operated on a sliding window input</p></li>
<li><p>Implementing a FIFO queue to cache intermediate results, e.g. as in Fast WaveNet.</p></li>
</ul>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data</p></li>
<li><p><strong>buffer</strong> (<em>tvm.relay.Expr</em>) – Previous value of the FIFO buffer</p></li>
<li><p><strong>axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Specify which axis should be used for buffering</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – Updated value for the buffer</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.get_pad_tuple1d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">get_pad_tuple1d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">padding</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.get_pad_tuple1d" title="永久链接至目标">¶</a></dt>
<dd><p>Common code to get the 1 dimensional pad option
:param padding: Padding size
:type padding: Union[int, Tuple[int, …]]</p>
<dl class="field-list simple">
<dt class="field-odd">返回</dt>
<dd class="field-odd"><p><ul class="simple">
<li><p><strong>pad_left</strong> (<em>int</em>) – Padding size on left</p></li>
<li><p><strong>pad_right</strong> (<em>int</em>) – Padding size on right.</p></li>
</ul>
</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.get_pad_tuple2d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">get_pad_tuple2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">padding</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.get_pad_tuple2d" title="永久链接至目标">¶</a></dt>
<dd><p>Common code to get the pad option
:param padding: Padding size
:type padding: Union[int, Tuple[int, …]]</p>
<dl class="field-list simple">
<dt class="field-odd">返回</dt>
<dd class="field-odd"><p><ul class="simple">
<li><p><strong>pad_top</strong> (<em>int</em>) – Padding size on top</p></li>
<li><p><strong>pad_left</strong> (<em>int</em>) – Padding size on left</p></li>
<li><p><strong>pad_down</strong> (<em>int</em>) – Padding size on down.</p></li>
<li><p><strong>pad_right</strong> (<em>int</em>) – Padding size on right.</p></li>
</ul>
</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.get_pad_tuple3d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">get_pad_tuple3d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">padding</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.get_pad_tuple3d" title="永久链接至目标">¶</a></dt>
<dd><p>Common code to get the pad option
:param padding: Padding size
:type padding: Union[int, Tuple[int, …]]</p>
<dl class="field-list simple">
<dt class="field-odd">返回</dt>
<dd class="field-odd"><p><ul class="simple">
<li><p><strong>pad_front</strong> (<em>int</em>) – Padding size on front</p></li>
<li><p><strong>pad_top</strong> (<em>int</em>) – Padding size on top</p></li>
<li><p><strong>pad_left</strong> (<em>int</em>) – Padding size on left</p></li>
<li><p><strong>pad_back</strong> (<em>int</em>) – Padding size on back</p></li>
<li><p><strong>pad_down</strong> (<em>int</em>) – Padding size on down.</p></li>
<li><p><strong>pad_right</strong> (<em>int</em>) – Padding size on right.</p></li>
</ul>
</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.global_avg_pool1d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">global_avg_pool1d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.global_avg_pool1d" title="永久链接至目标">¶</a></dt>
<dd><p>1D global average pooling operator.</p>
<p>This operator takes data as input and does 1D average value calculation
across each window represented by W.</p>
<p>In the default case, where the data_layout is <cite>NCW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, width)</cite>,
to produce an output Tensor with the following rule:</p>
<p>with data of shape (b, c, w)</p>
<div class="math notranslate nohighlight">
\[\mbox{out}(b, c, 1)  = \frac{1}{w} \sum_{n=0}^{w-1} \mbox{data}(b, c, n)\]</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.global_avg_pool2d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">global_avg_pool2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.global_avg_pool2d" title="永久链接至目标">¶</a></dt>
<dd><p>2D 全局平均池化算子。</p>
<p>This operator takes data as input and does 2D average value calculation
across each window represented by WxH.</p>
<p>In the default case, where the data_layout is <cite>NCHW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, height, width)</cite>,
to produce an output Tensor with the following rule:</p>
<p>with data of shape (b, c, h, w)</p>
<div class="math notranslate nohighlight">
\[\mbox{out}(b, c, 1, 1)  = \frac{1}{h * w} \sum_{m=0}^{h-1} \sum_{n=0}^{w-1}
     \mbox{data}(b, c, m, n)\]</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.global_avg_pool3d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">global_avg_pool3d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCDHW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.global_avg_pool3d" title="永久链接至目标">¶</a></dt>
<dd><p>3D global average pooling operator.</p>
<p>This operator takes data as input and does 3D average value calculation
across each window represented by DxWxH.</p>
<p>In the default case, where the data_layout is <cite>NCDHW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, depth, height, width)</cite>,
to produce an output Tensor with the following rule:</p>
<p>with data of shape (b, c, d, h, w)</p>
<div class="math notranslate nohighlight">
\[\mbox{out}(b, c, 1, 1, 1)  = \frac{1}{d * h * w} \sum_{l=0}^{d-1}  \sum_{m=0}^{h-1}
     \sum_{n=0}^{w-1} \mbox{data}(b, c, l, m, n)\]</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.global_max_pool1d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">global_max_pool1d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.global_max_pool1d" title="永久链接至目标">¶</a></dt>
<dd><p>1D global maximum pooling operator.</p>
<p>This operator takes data as input and does 1D max value calculation
across each window represented by W.</p>
<p>In the default case, where the data_layout is <cite>NCW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, width)</cite>,
to produce an output Tensor with the following rule:</p>
<p>with data of shape (b, c, w)
.. math:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span>\<span class="n">mbox</span><span class="p">{</span><span class="n">out</span><span class="p">}(</span><span class="n">b</span><span class="p">,</span> <span class="n">c</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>  <span class="o">=</span> \<span class="n">max_</span><span class="p">{</span><span class="n">n</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> \<span class="n">ldots</span><span class="p">,</span> <span class="n">w</span><span class="p">}</span> \<span class="n">mbox</span><span class="p">{</span><span class="n">data</span><span class="p">}(</span><span class="n">b</span><span class="p">,</span> <span class="n">c</span><span class="p">,</span> <span class="n">n</span><span class="p">)</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.global_max_pool2d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">global_max_pool2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.global_max_pool2d" title="永久链接至目标">¶</a></dt>
<dd><p>2D 全局最大池化算子。</p>
<p>This operator takes data as input and does 2D max value calculation
across each window represented by WxH.</p>
<p>In the default case, where the data_layout is <cite>NCHW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, height, width)</cite>,
to produce an output Tensor with the following rule:</p>
<p>with data of shape (b, c, h, w)</p>
<div class="math notranslate nohighlight">
\[\mbox{out}(b, c, 1, 1)  = \max_{m=0, \ldots, h} \max_{n=0, \ldots, w}
     \mbox{data}(b, c, m, n)\]</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.global_max_pool3d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">global_max_pool3d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCDHW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.global_max_pool3d" title="永久链接至目标">¶</a></dt>
<dd><p>3D global maximum pooling operator.</p>
<p>This operator takes data as input and does 3D max value calculation
across each window represented by DxWxH.</p>
<p>In the default case, where the data_layout is <cite>NCDHW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, depth, height, width)</cite>,
to produce an output Tensor with the following rule:</p>
<p>with data of shape (b, c, d, h, w)
.. math:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span>\<span class="n">mbox</span><span class="p">{</span><span class="n">out</span><span class="p">}(</span><span class="n">b</span><span class="p">,</span> <span class="n">c</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>  <span class="o">=</span>  \<span class="n">max_</span><span class="p">{</span><span class="n">l</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> \<span class="n">ldots</span><span class="p">,</span> <span class="n">d</span><span class="p">},</span>  \<span class="n">max_</span><span class="p">{</span><span class="n">m</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> \<span class="n">ldots</span><span class="p">,</span> <span class="n">h</span><span class="p">},</span>
     \<span class="n">max_</span><span class="p">{</span><span class="n">n</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> \<span class="n">ldots</span><span class="p">,</span> <span class="n">w</span><span class="p">}</span> \<span class="n">mbox</span><span class="p">{</span><span class="n">data</span><span class="p">}(</span><span class="n">b</span><span class="p">,</span> <span class="n">c</span><span class="p">,</span> <span class="n">l</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">n</span><span class="p">)</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.group_norm">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">group_norm</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gamma</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">beta</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">num_groups</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">epsilon</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">center</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">scale</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.group_norm" title="永久链接至目标">¶</a></dt>
<dd><p>Group normalization normalizes over group of channels for each training examples.
We can say that, Group Norm is in between Instance Norm and Layer Norm. When we put
all the channels into a single group, group normalization becomes Layer normalization.
And, when we put each channel into different groups it becomes Instance normalization</p>
<p><a class="reference external" href="https://arxiv.org/pdf/1803.08494.pdf">https://arxiv.org/pdf/1803.08494.pdf</a></p>
<p>Applies group normalization to the n-dimensional input array by seperating the input channels
into ‘num_groups’ groups, each containing ‘num_channels / num_groups’ channels.
The mean and standard-deviation are calculated separately over the each group. gamma and
beta are learnable per-channel affine transform parameter vectors of size num_channels.</p>
<div class="math notranslate nohighlight">
\[out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis)+\epsilon}}
    * gamma + beta\]</div>
<p>Unlike batch normalization, the mean and var are computed along a group of channels.</p>
<p>If the input has size k on axis 1, then both gamma and beta have shape (k,).</p>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>This operator can be optimized away for inference.</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – Input to which group_norm will be applied.</p></li>
<li><p><strong>gamma</strong> (<em>tvm.relay.Expr</em>) – The gamma scale factor.</p></li>
<li><p><strong>beta</strong> (<em>tvm.relay.Expr</em>) – The beta offset factor.</p></li>
<li><p><strong>num_groups</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – The number of groups to separate the channels into.</p></li>
<li><p><strong>axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em><em>, </em><em>default=1</em>) – The axis of the channels.</p></li>
<li><p><strong>epsilon</strong> (<em>double</em><em>, </em><em>optional</em><em>, </em><em>default=1e-5</em>) – Small float added to variance to avoid dividing by zero.</p></li>
<li><p><strong>center</strong> (<em>boolean</em><em>, </em><em>optional</em><em>, </em><em>default=True</em>) – If True, add offset of beta to normalized tensor, If False,
beta is ignored.</p></li>
<li><p><strong>scale</strong> (<em>boolean</em><em>, </em><em>optional</em><em>, </em><em>default=True</em>) – If True, multiply by gamma. If False, gamma is not used.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The normalized data.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.instance_norm">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">instance_norm</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gamma</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">beta</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">epsilon</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">center</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">scale</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.instance_norm" title="永久链接至目标">¶</a></dt>
<dd><p>Instance Normalization (Ulyanov and et al., 2016)
Applies instance normalization to the n-dimensional input array.</p>
<div class="math notranslate nohighlight">
\[out = \frac{data - mean(data)}{\sqrt{var(data)+\epsilon}}
    * gamma + beta\]</div>
<p>The instance normalization is similar to batch normalization, but unlike
batch normalization, the mean and var are calculated per-dimension
separately for each object(instance) in a mini-batch, not over a batch.
And the same normalization is applied both at test and train time.</p>
<p>Assume the input has size <em>k</em> on axis 1, then both <code class="docutils literal notranslate"><span class="pre">gamma</span></code> and <code class="docutils literal notranslate"><span class="pre">beta</span></code>
have shape <em>(k,)</em>.</p>
<p>The parameter <code class="docutils literal notranslate"><span class="pre">axis</span></code> specifies which axis of the input shape denotes
the ‘channel’.  The default is 1. Specifying -1 sets the channel axis
to be the last item in the input shape.</p>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>This operator can be optimized away for inference.</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – Input to which instance_norm will be applied.</p></li>
<li><p><strong>gamma</strong> (<em>tvm.relay.Expr</em>) – The gamma scale factor.</p></li>
<li><p><strong>beta</strong> (<em>tvm.relay.Expr</em>) – The beta offset factor.</p></li>
<li><p><strong>axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em><em>, </em><em>default=1</em>) – Specify along which shape axis the channel is specified.</p></li>
<li><p><strong>epsilon</strong> (<em>double</em><em>, </em><em>optional</em><em>, </em><em>default=1e-5</em>) – Small float added to variance to avoid dividing by zero.</p></li>
<li><p><strong>center</strong> (<em>boolean</em><em>, </em><em>optional</em><em>, </em><em>default=True</em>) – If True, add offset of beta to normalized tensor, If False,
beta is ignored.</p></li>
<li><p><strong>scale</strong> (<em>boolean</em><em>, </em><em>optional</em><em>, </em><em>default=True</em>) – If True, multiply by gamma. If False, gamma is not used.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><ul class="simple">
<li><p><strong>result</strong> (<em>tvm.relay.Expr</em>) – The normalized data.</p></li>
<li><p><strong>.. _`Instance Normalization</strong> (The Missing Ingredient for Fast Stylization`:) – <a class="reference external" href="https://arxiv.org/abs/1607.08022">https://arxiv.org/abs/1607.08022</a></p></li>
</ul>
</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.l2_normalize">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">l2_normalize</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">eps</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.l2_normalize" title="永久链接至目标">¶</a></dt>
<dd><p>对输入数据执行L2标准化</p>
<div class="math notranslate nohighlight">
\[y(i, j) = x(i, j) / sqrt(max(sum(x^2), eps))\]</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>eps</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a>) – epsilon value</p></li>
<li><p><strong>axis</strong> (<em>list of int</em><em>, </em><em>optional</em>) – axis over the normalization applied</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.layer_norm">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">layer_norm</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">gamma</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">beta</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">-</span> <span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">epsilon</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">center</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">scale</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">True</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.layer_norm" title="永久链接至目标">¶</a></dt>
<dd><p>Layer normalization (Lei Ba and et al., 2016).
Applies layer normalization to the n-dimensional input array.
This operator takes an n-dimensional input array and normalizes
the input using the given axis:</p>
<div class="math notranslate nohighlight">
\[out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis)+\epsilon}}
    * gamma + beta\]</div>
<p>Unlike batch normalization, the mean and var are computed along the channel dimension.</p>
<p>Assume the input has size k on axis 1, then both gamma and beta have shape (k,).</p>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>This operator can be optimized away for inference.</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – Input to which layer_norm will be applied.</p></li>
<li><p><strong>gamma</strong> (<em>tvm.relay.Expr</em>) – The gamma scale factor.</p></li>
<li><p><strong>beta</strong> (<em>tvm.relay.Expr</em>) – The beta offset factor.</p></li>
<li><p><strong>axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em><em>, </em><em>default=-1</em>) – The axis that should be normalized, typically the axis of the channels.</p></li>
<li><p><strong>epsilon</strong> (<em>double</em><em>, </em><em>optional</em><em>, </em><em>default=1e-5</em>) – Small float added to variance to avoid dividing by zero.</p></li>
<li><p><strong>center</strong> (<em>boolean</em><em>, </em><em>optional</em><em>, </em><em>default=True</em>) – If True, add offset of beta to normalized tensor, If False,
beta is ignored.</p></li>
<li><p><strong>scale</strong> (<em>boolean</em><em>, </em><em>optional</em><em>, </em><em>default=True</em>) – If True, multiply by gamma. If False, gamma is not used.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The normalized data.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.leaky_relu">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">leaky_relu</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">alpha</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.01</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.leaky_relu" title="永久链接至目标">¶</a></dt>
<dd><p>该算子将数据作为输入，并代入带泄露的修正线性单元。</p>
<div class="math notranslate nohighlight">
\[`y = x &gt; 0 ? x : alpha * x`\]</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>alpha</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a>) – Slope coefficient for the negative half axis.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.log_softmax">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">log_softmax</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">-</span> <span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.log_softmax" title="永久链接至目标">¶</a></dt>
<dd><p>计算 log_softmax。</p>
<div class="math notranslate nohighlight">
\[\text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)}\]</div>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>This operator can be optimized away for inference.</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – The axis to sum over when computing log softmax</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.lrn">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">lrn</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">5</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">bias</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">2</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">alpha</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1e-05</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">beta</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0.75</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.lrn" title="永久链接至目标">¶</a></dt>
<dd><p>This operator takes data as input and does local response normalization.</p>
<p>Normalize the input in a local region across or within feature maps.
Each input value is divided by (data / (bias + (alpha * sum_data ^2 /size))^beta)
where n is the size of each local region, and the sum is taken over the region
centered at that value (zero padding is added where necessary).</p>
<div class="math notranslate nohighlight">
\[(data / (bias + (alpha * sum_data ^2 /size))^beta)\]</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – The size of the local region to be considered for normalization.</p></li>
<li><p><strong>axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Input data layout channel axis. Default value is 1 for NCHW format</p></li>
<li><p><strong>bias</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a><em>, </em><em>optional</em>) – The offset parameter to avoid dividing by 0.</p></li>
<li><p><strong>alpha</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a><em>, </em><em>optional</em>) – The scaling parameter.</p></li>
<li><p><strong>beta</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a><em>, </em><em>optional</em>) – The exponent parameter.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.matmul">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">matmul</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">tensor_a</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">tensor_b</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">units</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">None</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">out_dtype</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">''</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">transpose_a</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">transpose_b</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.matmul" title="永久链接至目标">¶</a></dt>
<dd><p>Matmul operator.
Applies a linear transformation. The A &amp; B can be transposed.</p>
<div class="math notranslate nohighlight">
\[`C = A * B`\]</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The first input of the operator,
of shape <cite>(d_1, d_2, …, d_n, units_in)</cite> or <cite>(d_1, d_2, …, units_in, d_n)</cite>.</p></li>
<li><p><strong>weight</strong> (<em>tvm.relay.Expr</em>) – The second input expressions, 2-D matrix,
of shape <cite>(units_in, units)</cite> or <cite>(units, units_in)</cite>.</p></li>
<li><p><strong>units</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>]</em>) – Number of hidden units of the matmul transformation.</p></li>
<li><p><strong>out_dtype</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>]</em>) – Specifies the output data type for mixed precision matmul,
of shape <cite>(d_1, d_2, …, d_n, units)</cite>.</p></li>
<li><p><strong>transpose_a</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>] </em><em>= False</em>) – Whether the data tensor is in transposed format.</p></li>
<li><p><strong>transpose_b</strong> (<em>Optional</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>] </em><em>= False</em>) – Whether the weight tensor is in transposed format.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.max_pool1d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">max_pool1d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pool_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ceil_mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.max_pool1d" title="永久链接至目标">¶</a></dt>
<dd><p>1D maximum pooling operator.</p>
<p>This operator takes data as input and does 1D max value calculation
with in pool_size sized window by striding defined by stride.</p>
<p>In the default case, where the data_layout is <cite>NCW</cite>
a data Tensor with shape <cite>(batch_size, channels, width)</cite>,
to produce an output Tensor.</p>
<p>The ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>pool_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The size of window for pooling.</p></li>
<li><p><strong>strides</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The strides of pooling.</p></li>
<li><p><strong>dilation</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The dilation of pooling.</p></li>
<li><p><strong>padding</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The padding for pooling.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>ceil_mode</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To enable or disable ceil while pooling.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.max_pool2d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">max_pool2d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pool_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ceil_mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.max_pool2d" title="永久链接至目标">¶</a></dt>
<dd><p>2D 最大池化算子。</p>
<p>This operator takes data as input and does 2D max value calculation
with in pool_size sized window by striding defined by stride</p>
<p>In the default case, where the data_layout is <cite>NCHW</cite>
a data Tensor with shape <cite>(batch_size, in_channels, height, width)</cite>,
to produce an output Tensor with the following rule:</p>
<p>with data of shape (b, c, h, w) and pool_size (kh, kw)</p>
<div class="math notranslate nohighlight">
\[\mbox{out}(b, c, y, x)  = \max_{m=0, \ldots, kh-1} \max_{n=0, \ldots, kw-1}
     \mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)\]</div>
<p>Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
This operator accepts data layout specification.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>pool_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The size of window for pooling.</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of pooling.</p></li>
<li><p><strong>dilation</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The dilation of pooling.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding for pooling.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>ceil_mode</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To enable or disable ceil while pooling.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.max_pool2d_grad">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">max_pool2d_grad</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">out_grad</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pool_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ceil_mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.max_pool2d_grad" title="永久链接至目标">¶</a></dt>
<dd><p>Gradient of 2D maximum pooling operator.</p>
<p>This operator takes out_grad and data as input and calculates gradient of max_pool2d.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>out_grad</strong> (<em>tvm.relay.Expr</em>) – The output gradient</p></li>
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>pool_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The size of window for pooling.</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of pooling.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding for pooling.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>ceil_mode</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To enable or disable ceil while pooling.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.max_pool3d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">max_pool3d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pool_size</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">strides</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">dilation</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(1,</span> <span class="pre">1,</span> <span class="pre">1)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">padding</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">(0,</span> <span class="pre">0,</span> <span class="pre">0)</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCDHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ceil_mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.max_pool3d" title="永久链接至目标">¶</a></dt>
<dd><p>3D 最大池化算子。</p>
<p>This operator takes data as input and does 3D max value calculation
with in pool_size sized window by striding defined by stride.</p>
<p>In the default case, where the data_layout is <cite>NCDHW</cite>
a data Tensor with shape <cite>(batch_size, channels, depth, height, width)</cite>,
to produce an output Tensor.</p>
<p>The ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>pool_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The size of window for pooling.</p></li>
<li><p><strong>strides</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The strides of pooling.</p></li>
<li><p><strong>dilation</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><em>tuple of int</em><em>, </em><em>optional</em>) – The dilation of pooling.</p></li>
<li><p><strong>padding</strong> (<em>tuple of int</em><em>, </em><em>optional</em>) – The padding for pooling.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>ceil_mode</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – To enable or disable ceil while pooling.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.mirror_pad">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">mirror_pad</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pad_width</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'SYMMETRIC'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.mirror_pad" title="永久链接至目标">¶</a></dt>
<dd><p>MirrorPadding</p>
<p>This operator takes in a tensor and pads each axis by the specified
widths using mirroring of the border pixels.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator</p></li>
<li><p><strong>pad_width</strong> (<em>tuple of &lt;tuple of &lt;int&gt;&gt;</em><em>, </em><em>required</em>) – Number of values padded to the edges of each axis, in the format
of ((before_1, after_1), …, (before_N, after_N))</p></li>
<li><p><strong>mode</strong> (<em>string</em><em>, </em><em>optional</em><em>, </em><em>default='SYMMETRIC'</em>) – What type of mirroring to use, must be SYMMETRIC or REFLECT.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.nll_loss">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">nll_loss</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">predictions</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">targets</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">weights</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">reduction</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'mean'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ignore_index</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">-</span> <span class="pre">100</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.nll_loss" title="永久链接至目标">¶</a></dt>
<dd><p>Negative log likelihood loss.</p>
<dl class="simple">
<dt>output{n, i_1, i_2, …, i_k} = -p * w</dt><dd><dl class="simple">
<dt>where t = target{n, i_1, i_2, …, i_k}</dt><dd><p>p = predictions{n, t, i_1, i_2, i_k}
w = weights{n, i_1, i_2, …, i_k} if t != ignore_index else 0</p>
</dd>
</dl>
</dd>
</dl>
<p>result = reduction(output)</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>predictions</strong> (<em>tvm.relay.Expr</em>) – The predictions.</p></li>
<li><p><strong>targets</strong> (<em>tvm.relay.Expr</em>) – The target value of each prediction.</p></li>
<li><p><strong>weights</strong> (<em>tvm.relay.Expr</em>) – The weight of each target value.</p></li>
<li><p><strong>reduction</strong> (<em>string</em>) – The reduction method to apply to the output.
Possible values are “mean”, “sum” and “none”.</p></li>
<li><p><strong>ignore_index</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – The target value to ignore.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.pad">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">pad</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pad_width</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pad_value</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pad_mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'constant'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.pad" title="永久链接至目标">¶</a></dt>
<dd><p>填充</p>
<p>This operator takes in a tensor and pads each axis by the specified
widths using the specified value.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator</p></li>
<li><p><strong>pad_width</strong> (<em>tuple of &lt;tuple of &lt;int&gt;&gt;</em><em>, or </em><em>tvm.relay.Expr</em><em>, </em><em>required</em>) – Number of values padded to the edges of each axis, in the format
of ((before_1, after_1), …, (before_N, after_N))</p></li>
<li><p><strong>pad_value</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a><em>, or </em><em>tvm.relay.Expr</em><em>, </em><em>optional</em><em>, </em><em>default=0</em>) – The value used for padding</p></li>
<li><p><strong>pad_mode</strong> (<em>'constant'</em><em>, </em><em>'edge'</em><em>, </em><em>'reflect'</em>) – ‘constant’ pads with constant_value pad_value
‘edge’ pads using the edge values of the input array
‘reflect’ pads by reflecting values with respect to the edge</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.prelu">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">prelu</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">alpha</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.prelu" title="永久链接至目标">¶</a></dt>
<dd><p>该算子将数据作为输入，并代入带泄露的修正线性单元。</p>
<div class="math notranslate nohighlight">
\[y = x &gt; 0 ? x : alpha * x\]</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>alpha</strong> (<em>tvm.relay.Expr</em>) – Slope coefficient for the negative half axis.</p></li>
<li><p><strong>axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – Specify which shape axis the channel is specified.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.relu">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">relu</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.relu" title="永久链接至目标">¶</a></dt>
<dd><p>修正线性单元。</p>
<div class="math notranslate nohighlight">
\[out = max(x, 0)\]</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data</p>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.softmax">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">softmax</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">axis</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">-</span> <span class="pre">1</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.softmax" title="永久链接至目标">¶</a></dt>
<dd><p>计算 softmax。</p>
<div class="math notranslate nohighlight">
\[\text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}\]</div>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>This operator can be optimized away for inference.</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>axis</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em>, </em><em>optional</em>) – The axis to sum over when computing softmax</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.space_to_batch_nd">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">space_to_batch_nd</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">block_shape</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">paddings</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">pad_value</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">0</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.space_to_batch_nd" title="永久链接至目标">¶</a></dt>
<dd><p>Divide spatial dimensions of the data into a grid of blocks
and interleave them into batch dim.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<a class="reference internal" href="../te.html#tvm.te.Tensor" title="tvm.te.Tensor"><em>tvm.te.Tensor</em></a>) – N-D with shape [batch, spatial_shape, remaining_shape]</p></li>
<li><p><strong>block_shape</strong> (<em>relay.Expr</em>) – 1-D of size [M] where M is number of spatial dims, specifies block size
for each spatial dimension.</p></li>
<li><p><strong>paddings</strong> (<em>relay.Expr</em>) – 2-D of shape [M, 2] where M is number of spatial dims, specifies
[before, after] paddings for each spatial dimension.</p></li>
<li><p><strong>pad_value</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a><em>, or </em><em>relay.Expr</em><em>, </em><em>optional</em><em>, </em><em>default=0</em>) – The value used for padding.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – N-D Tensor with shape
[in_batch * prod(block_shape),
padded_data[1] / block_shape[0], …, padded_data[M] / block_shape[M-1],
remaining_shape]</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.space_to_depth">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">space_to_depth</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">block_size</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.space_to_depth" title="永久链接至目标">¶</a></dt>
<dd><p>Convert spatial blocks into channels.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – Input data with spatial dimensions divisible by block_size</p></li>
<li><p><strong>block_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a>) – Size of blocks to decompose into channels.</p></li>
<li><p><strong>layout</strong> (<em>string</em>) – One of NCHW or NHWC, indicates channel axis.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><p><strong>result</strong> –</p>
<dl class="simple">
<dt>Tensor with shape [in_batch, in_channel * block_size * block_size,</dt><dd><p>in_height / block_size, in_width / block_size]</p>
</dd>
</dl>
</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.sparse_add">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">sparse_add</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">dense_mat</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">sparse_mat</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.sparse_add" title="永久链接至目标">¶</a></dt>
<dd><p>Computes the matrix addition of <cite>dense_mat</cite> and <cite>sparse_mat</cite>, where <cite>dense_mat</cite> is
a dense matrix and <cite>sparse_mat</cite> is a sparse (CSR) namedtuple with
fields <cite>data</cite>, <cite>indices</cite>, and <cite>indptr</cite>.</p>
<div class="math notranslate nohighlight">
\[\mbox{sparse_add}(dense_mat, sparse_mat)[m, n] = \mbox{add}(\mbox{as_dense}(S), (D))[m, n]\]</div>
<p>where <cite>as_dense</cite> returns dense equivalent of the given S(sparse matrix)
while performing addition with given D(dense matrix).</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>dense_mat</strong> (<em>tvm.relay.Expr</em>) – The input dense matrix for the matrix addition</p></li>
<li><p><strong>sparse_mat</strong> (<em>Union</em><em>[</em><em>namedtuple</em><em>, </em><em>Tuple</em><em>[</em><em>ndarray</em><em>, </em><em>ndarray</em><em>, </em><em>ndarray</em><em>]</em><em>]</em><em></em>) – The input sparse matrix(CSR) for the matrix addition.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
<p class="rubric">实际案例</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">dense_data</span> <span class="o">=</span> <span class="p">[[</span> <span class="mf">3.</span><span class="p">,</span>   <span class="mf">4.</span><span class="p">,</span>   <span class="mf">4.</span> <span class="p">]</span>
              <span class="p">[</span> <span class="mf">4.</span><span class="p">,</span>  <span class="mf">2.</span><span class="p">,</span>  <span class="mf">5.</span> <span class="p">]]</span>
<span class="n">sparse_data</span> <span class="o">=</span> <span class="p">[</span><span class="mf">4.</span><span class="p">,</span> <span class="mf">8.</span><span class="p">]</span>
<span class="n">sparse_indices</span> <span class="o">=</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">]</span>
<span class="n">sparse_indptr</span> <span class="o">=</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">]</span>

<span class="n">output</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">sparse_add</span><span class="p">(</span><span class="n">dense_data</span><span class="p">,</span> <span class="n">sparse_data</span><span class="p">,</span> <span class="n">sparse_indices</span><span class="p">,</span> <span class="n">sparse_indptr</span><span class="p">)</span>

<span class="n">output</span> <span class="o">=</span> <span class="p">[[</span> <span class="mf">7.</span><span class="p">,</span>   <span class="mf">4.</span><span class="p">,</span>   <span class="mf">4.</span> <span class="p">]</span>
          <span class="p">[</span> <span class="mf">4.</span><span class="p">,</span>  <span class="mf">2.</span><span class="p">,</span>  <span class="mf">13.</span> <span class="p">]]</span>
</pre></div>
</div>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.sparse_dense">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">sparse_dense</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">dense_mat</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">sparse_mat</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">sparse_lhs</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.sparse_dense" title="永久链接至目标">¶</a></dt>
<dd><p>Computes the matrix multiplication of <cite>dense_mat</cite> and <cite>sparse_mat</cite>, where <cite>dense_mat</cite> is
a dense matrix and <cite>sparse_mat</cite> is a sparse (either BSR or CSR) namedtuple with
fields <cite>data</cite>, <cite>indices</cite>, and <cite>indptr</cite>.</p>
<dl>
<dt>if sparse_lhs=False:</dt><dd><div class="math notranslate nohighlight">
\[\mbox{sparse_dense}(dense_mat, sparse_mat)[m, n]
= \mbox{matmul}(D, \mbox{as_dense}(S)^T)[m, n]\]</div>
</dd>
<dt>if sparse_lhs=True:</dt><dd><div class="math notranslate nohighlight">
\[\mbox{sparse_dense}(dense_mat, sparse_mat)[m, n]
= \mbox{matmul}(\mbox{as_dense}(S), (D)^T)[m, n]\]</div>
</dd>
</dl>
<p>where <cite>as_dense</cite> returns dense equivalent of the given S(sparse matrix)
while performing matmul with given D(dense matrix).</p>
<p>See
<a class="reference external" href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html">https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html</a>
and
<a class="reference external" href="https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.bsr_matrix.html">https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.bsr_matrix.html</a>
for more detail on the sparse matrix representation.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>dense_mat</strong> (<em>tvm.relay.Expr</em>) – The input dense matrix for the matrix multiplication</p></li>
<li><p><strong>sparse_mat</strong> (<em>Union</em><em>[</em><em>namedtuple</em><em>, </em><em>Tuple</em><em>[</em><em>ndarray</em><em>, </em><em>ndarray</em><em>, </em><em>ndarray</em><em>]</em><em>]</em><em></em>) – The input sparse matrix for the matrix multiplication.</p></li>
<li><p><strong>sparse_lhs</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – Indicates whether lhs or rhs matrix is sparse. Default value is False.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.sparse_transpose">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">sparse_transpose</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">x</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.sparse_transpose" title="永久链接至目标">¶</a></dt>
<dd><p>Computes the fast matrix transpose of x,
where x is a sparse tensor in CSR format (represented as a namedtuple
with fields <cite>data</cite>, <cite>indices</cite>, and <cite>indptr</cite>).</p>
<p>** Currently only support Square Matrices **</p>
<div class="math notranslate nohighlight">
\[\mbox{sparse_transpose}(x)[n, n] = (x^T)[n, n]\]</div>
<p>Please refer to <a class="reference external" href="https://github.com/scipy/scipy/blob/v1.3.0/scipy/sparse/csr.py">https://github.com/scipy/scipy/blob/v1.3.0/scipy/sparse/csr.py</a>
for the algorithm implemented in this operator.</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><p><strong>x</strong> (<em>Union</em><em>[</em><em>namedtuple</em><em>, </em><em>Tuple</em><em>[</em><em>ndarray</em><em>, </em><em>ndarray</em><em>, </em><em>ndarray</em><em>]</em><em>]</em><em></em>) – The sparse weight matrix for the fast matrix transpose.</p>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – Tuple of output sparse tensor (same shape and format as input),
i.e. if CSR then output is in ([data, indices, indptr]) form</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>relay.Tuple([tvm.relay.Expr, tvm.relay.Expr, tvm.relay.Expr])</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.upsampling">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">upsampling</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">scale_h</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">scale_w</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">method</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'nearest_neighbor'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">align_corners</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">False</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.upsampling" title="永久链接至目标">¶</a></dt>
<dd><p>上采样。</p>
<p>This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is <cite>NCHW</cite>
with data of shape (n, c, h, w)
out will have a shape (n, c, h*scale_h, w*scale_w)</p>
<p>method indicates the algorithm to be used while calculating the out value
and method can be one of (“bilinear”, “nearest_neighbor”, “bicubic”)</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>scale_h</strong> (<em>tvm.relay.Expr</em><em> or </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a>) – The scale factor for height upsampling.</p></li>
<li><p><strong>scale_w</strong> (<em>tvm.relay.Expr</em><em> or </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.10)"><em>int</em></a><em> or </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.10)"><em>float</em></a>) – The scale factor for width upsampling.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>method</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Scale method to used [nearest_neighbor, bilinear, bicubic].</p></li>
<li><p><strong>align_corners</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(在 Python v3.10)"><em>bool</em></a><em>, </em><em>optional</em>) – Whether to keep corners in proper place.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

<dl class="py function">
<dt class="sig sig-object py" id="tvm.relay.nn.upsampling3d">
<span class="sig-prename descclassname"><span class="pre">tvm.relay.nn.</span></span><span class="sig-name descname"><span class="pre">upsampling3d</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">data</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">scale_d</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">scale_h</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">scale_w</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">1</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">layout</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'NCDHW'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">method</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'nearest_neighbor'</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">coordinate_transformation_mode</span></span><span class="o"><span class="pre">=</span></span><span class="default_value"><span class="pre">'half_pixel'</span></span></em><span class="sig-paren">)</span><a class="headerlink" href="#tvm.relay.nn.upsampling3d" title="永久链接至目标">¶</a></dt>
<dd><p>3D 上采样。</p>
<p>This operator takes data as input and does 3D scaling to the given scale factor.
In the default case, where the data_layout is <cite>NCDHW</cite>
with data of shape (n, c, d, h, w)
out will have a shape (n, c, d*scale_d, h*scale_h, w*scale_w)</p>
<p>method indicates the algorithm to be used while calculating the out value
and method can be one of (“trilinear”, “nearest_neighbor”)</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>data</strong> (<em>tvm.relay.Expr</em>) – The input data to the operator.</p></li>
<li><p><strong>scale_d</strong> (<em>tvm.relay.Expr</em>) – The scale factor for depth upsampling.</p></li>
<li><p><strong>scale_h</strong> (<em>tvm.relay.Expr</em>) – The scale factor for height upsampling.</p></li>
<li><p><strong>scale_w</strong> (<em>tvm.relay.Expr</em>) – The scale factor for width upsampling.</p></li>
<li><p><strong>layout</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Layout of the input.</p></li>
<li><p><strong>method</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.10)"><em>str</em></a><em>, </em><em>optional</em>) – Scale method to used [nearest_neighbor, trilinear].</p></li>
<li><p><strong>coordinate_transformation_mode</strong> (<em>string</em><em>, </em><em>optional</em>) – Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
Refer to the ONNX Resize operator specification for details.
Available options are “half_pixel”, “align_corners” and “asymmetric”.</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p><strong>result</strong> – The computed result.</p>
</dd>
<dt class="field-odd">返回类型</dt>
<dd class="field-odd"><p>tvm.relay.Expr</p>
</dd>
</dl>
</dd></dl>

</div>


           </div>
           
          </div>
          

<footer>

    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="vision.html" class="btn btn-neutral float-right" title="tvm.relay.vision" accesskey="n" rel="next">下一个 <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="frontend.html" class="btn btn-neutral float-left" title="tvm.relay.frontend" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> 上一个</a>
      
    </div>

<div id="button" class="backtop"><img src="../../../../_static//img/right.svg" alt="backtop"/> </div>
<section class="footerSec">
    <div class="footerHeader">
      <ul class="d-flex align-md-items-center justify-content-between flex-column flex-md-row">
        <li class="copywrite d-flex align-items-center">
          <h5 id="copy-right-info">© 2020 Apache Software Foundation | All right reserved</h5>
        </li>
      </ul>

    </div>

    <ul>
      <li class="footernote">Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather, and the Apache TVM project logo are either trademarks or registered trademarks of the Apache Software Foundation.</li>
    </ul>

</section>
</footer>
        </div>
      </div>

    </section>

  </div>
  

    <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script>
    <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" crossorigin="anonymous"></script>

  </body>
  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
    <!-- Theme Analytics -->
    <script>
    (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
      (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
      m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
    })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

    ga('create', 'UA-75982049-2', 'auto');
    ga('send', 'pageview');
    </script>

    
   

</body>
</html>