





<!DOCTYPE html>
<html class="writer-html5" lang="zh-CN" >
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>Matrix Multiply Blocking &mdash; tvm 0.8.dev1982 文档</title>
  

  
  <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/gallery.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/css/tlcpack_theme.css" type="text/css" />

  
  
    <link rel="shortcut icon" href="../../../../_static/tvm-logo-square.png"/>
  

  
  
  
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../../../" src="../../../../_static/documentation_options.js"></script>
        <script data-url_root="../../../../" id="documentation_options" src="../../../../_static/documentation_options.js"></script>
        <script src="../../../../_static/jquery.js"></script>
        <script src="../../../../_static/underscore.js"></script>
        <script src="../../../../_static/doctools.js"></script>
        <script src="../../../../_static/translations.js"></script>
    
    <script type="text/javascript" src="../../../../_static/js/theme.js"></script>

    
    <script type="text/javascript" src="../../../../_static/js/tlcpack_theme.js"></script>
    <link rel="index" title="索引" href="../../../../genindex.html" />
    <link rel="search" title="搜索" href="../../../../search.html" />
    <link rel="next" title="Auto-tuning a ALU fused op on VTA" href="../autotvm/tune_alu_vta.html" />
    <link rel="prev" title="2D Convolution Optimization" href="convolution_opt.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    
<header class="header">
    <div class="innercontainer">
      <div class="headerInner d-flex justify-content-between align-items-center">
          <div class="headerLogo">
               <a href="https://tvm.apache.org/"><img src=https://tvm.apache.org/assets/images/logo.svg alt="logo"></a>
          </div>

          <div id="headMenu" class="headerNav">
            <button type="button" id="closeHeadMenu" class="navCloseBtn"><img src="../../../../_static/img/close-icon.svg" alt="Close"></button>
             <ul class="nav">
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/community>Community</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/download>Download</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/vta>VTA</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/blog>Blog</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/docs>Docs</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmconf.org>Conference</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://github.com/apache/tvm/>Github</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmchinese.github.io/declaration_zh_CN.html>About-Translators</a>
                </li>
             </ul>
               <div class="responsivetlcdropdown">
                 <button type="button" class="btn-link">
                   ASF
                 </button>
                 <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                 </ul>
               </div>
          </div>
            <div class="responsiveMenuIcon">
              <button type="button" id="menuBtn" class="btn-menu"><img src="../../../../_static/img/menu-icon.svg" alt="Menu Icon"></button>
            </div>

            <div class="tlcDropdown">
              <div class="dropdown">
                <button type="button" class="btn-link dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
                  ASF
                </button>
                <div class="dropdown-menu dropdown-menu-right">
                  <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                  </ul>
                </div>
              </div>
          </div>
       </div>
    </div>
 </header>
 
    <nav data-toggle="wy-nav-shift" class="wy-nav-side fixed">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../../../index.html">
          

          
            
            <img src="../../../../_static/tvm-logo-small.png" class="logo" alt="Logo"/>
          
          </a>

          
            
            
                <div class="version">
                  0.8.dev1982
                </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption" role="heading"><span class="caption-text">如何开始</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../install/index.html">安装 TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../contribute/index.html">贡献者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">用户引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../tutorial/index.html">User Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../how_to/index.html">How To Guides</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">开发者引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../dev/tutorial/index.html">Developer Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../dev/how_to/how_to.html">开发者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">架构指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../arch/index.html">Design and Architecture</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">主题引导</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../../../microtvm/index.html">microTVM：裸机使用TVM</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="../../index.html">VTA: Versatile Tensor Accelerator</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="../../install.html">VTA安装指南</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../dev/index.html">VTA设计与开发指南</a></li>
<li class="toctree-l2 current"><a class="reference internal" href="../index.html">VTA教程</a><ul class="current">
<li class="toctree-l3"><a class="reference internal" href="../matrix_multiply.html">Simple Matrix Multiply</a></li>
<li class="toctree-l3"><a class="reference internal" href="../vta_get_started.html">从 VTA 开始</a></li>
<li class="toctree-l3"><a class="reference internal" href="../index.html#compile-deep-learning-models">编译深度学习模型</a></li>
<li class="toctree-l3 current"><a class="reference internal" href="../index.html#optimize-tensor-operators">优化张量算子</a><ul class="current">
<li class="toctree-l4"><a class="reference internal" href="convolution_opt.html">2D Convolution Optimization</a></li>
<li class="toctree-l4 current"><a class="current reference internal" href="#">Matrix Multiply Blocking</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../index.html#auto-tuning">自动调整</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../index.html#literature">Literature</a></li>
</ul>
</li>
</ul>
<p class="caption" role="heading"><span class="caption-text">参考指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/langref/index.html">语言参考</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/api/python/index.html">Python API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/api/links.html">Other APIs</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/publications.html">Publications</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../genindex.html">索引</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
      
      <nav class="wy-nav-top" aria-label="top navigation" data-toggle="wy-nav-top">
        
            <div class="togglemenu">

            </div>
            <div class="nav-content">
              <!-- tvm -->
              Table of content
            </div>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        

          




















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../../../index.html">Docs</a> <span class="br-arrow">></span></li>
        
          <li><a href="../../index.html">VTA: Versatile Tensor Accelerator</a> <span class="br-arrow">></span></li>
        
          <li><a href="../index.html">VTA教程</a> <span class="br-arrow">></span></li>
        
      <li>Matrix Multiply Blocking</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
            <a href="../../../../_sources/topic/vta/tutorials/optimize/matrix_multiply_opt.rst.txt" rel="nofollow"> <img src="../../../../_static//img/source.svg" alt="viewsource"/></a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="sphx-glr-download-link-note admonition note">
<p class="admonition-title">注解</p>
<p>点击 <a class="reference internal" href="#sphx-glr-download-topic-vta-tutorials-optimize-matrix-multiply-opt-py"><span class="std std-ref">此处</span></a> 下载全部示例代码</p>
</div>
<div class="sphx-glr-example-title section" id="matrix-multiply-blocking">
<span id="vta-mat-mult-opt"></span><span id="sphx-glr-topic-vta-tutorials-optimize-matrix-multiply-opt-py"></span><h1>Matrix Multiply Blocking<a class="headerlink" href="#matrix-multiply-blocking" title="永久链接至标题">¶</a></h1>
<p><strong>作者</strong>: <a class="reference external" href="https://homes.cs.washington.edu/~moreau/">Thierry Moreau</a></p>
<p>This tutorial provides an overview on how to use TVM to map matrix
multiplication efficiently on the VTA design.
We recommend covering the <a class="reference internal" href="../matrix_multiply.html#basic-mat-mult"><span class="std std-ref">Simple Matrix Multiply</span></a> tutorial first.</p>
<p>In this tutorial, we will demonstrate TVM schedule optimizations to break large
neural network operators down onto smaller blocks to achieve computation within
limited hardware accelerator resources.</p>
<div class="section" id="rpc-setup">
<h2>RPC设置<a class="headerlink" href="#rpc-setup" title="永久链接至标题">¶</a></h2>
<p>We start by programming the Pynq’s FPGA and building its RPC runtime.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">__future__</span> <span class="k">import</span> <span class="n">absolute_import</span><span class="p">,</span> <span class="n">print_function</span>

<span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">tvm</span>
<span class="kn">from</span> <span class="nn">tvm</span> <span class="k">import</span> <span class="n">te</span>
<span class="kn">import</span> <span class="nn">vta</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">tvm</span> <span class="k">import</span> <span class="n">rpc</span>
<span class="kn">from</span> <span class="nn">tvm.contrib</span> <span class="k">import</span> <span class="n">utils</span>
<span class="kn">from</span> <span class="nn">vta.testing</span> <span class="k">import</span> <span class="n">simulator</span>

<span class="c1"># Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file</span>
<span class="n">env</span> <span class="o">=</span> <span class="n">vta</span><span class="o">.</span><span class="n">get_env</span><span class="p">()</span>

<span class="c1"># We read the Pynq RPC host IP address and port number from the OS environment</span>
<span class="n">host</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;VTA_RPC_HOST&quot;</span><span class="p">,</span> <span class="s2">&quot;192.168.2.99&quot;</span><span class="p">)</span>
<span class="n">port</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;VTA_RPC_PORT&quot;</span><span class="p">,</span> <span class="s2">&quot;9091&quot;</span><span class="p">))</span>

<span class="c1"># We configure both the bitstream and the runtime system on the Pynq</span>
<span class="c1"># to match the VTA configuration specified by the vta_config.json file.</span>
<span class="k">if</span> <span class="n">env</span><span class="o">.</span><span class="n">TARGET</span> <span class="o">==</span> <span class="s2">&quot;pynq&quot;</span><span class="p">:</span>

    <span class="c1"># Make sure that TVM was compiled with RPC=1</span>
    <span class="k">assert</span> <span class="n">tvm</span><span class="o">.</span><span class="n">runtime</span><span class="o">.</span><span class="n">enabled</span><span class="p">(</span><span class="s2">&quot;rpc&quot;</span><span class="p">)</span>
    <span class="n">remote</span> <span class="o">=</span> <span class="n">rpc</span><span class="o">.</span><span class="n">connect</span><span class="p">(</span><span class="n">host</span><span class="p">,</span> <span class="n">port</span><span class="p">)</span>

    <span class="c1"># Reconfigure the JIT runtime</span>
    <span class="n">vta</span><span class="o">.</span><span class="n">reconfig_runtime</span><span class="p">(</span><span class="n">remote</span><span class="p">)</span>

    <span class="c1"># Program the FPGA with a pre-compiled VTA bitstream.</span>
    <span class="c1"># You can program the FPGA with your own custom bitstream</span>
    <span class="c1"># by passing the path to the bitstream file instead of None.</span>
    <span class="n">vta</span><span class="o">.</span><span class="n">program_fpga</span><span class="p">(</span><span class="n">remote</span><span class="p">,</span> <span class="n">bitstream</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>

<span class="c1"># In simulation mode, host the RPC server locally.</span>
<span class="k">elif</span> <span class="n">env</span><span class="o">.</span><span class="n">TARGET</span> <span class="ow">in</span> <span class="p">[</span><span class="s2">&quot;sim&quot;</span><span class="p">,</span> <span class="s2">&quot;tsim&quot;</span><span class="p">]:</span>
    <span class="n">remote</span> <span class="o">=</span> <span class="n">rpc</span><span class="o">.</span><span class="n">LocalSession</span><span class="p">()</span>
</pre></div>
</div>
</div>
<div class="section" id="computation-declaration">
<h2>Computation Declaration<a class="headerlink" href="#computation-declaration" title="永久链接至标题">¶</a></h2>
<p>As a first step, we need to describe our matrix multiplication computation.
We define the matrix multiplication as the computation one would find in a
fully connected layer, defined by its batch size, input channels, and output
channels.
These have to be integer multiples of the VTA tensor shape:
<code class="code docutils literal notranslate"><span class="pre">BATCH</span></code>, <code class="code docutils literal notranslate"><span class="pre">BLOCK_IN</span></code>, and <code class="code docutils literal notranslate"><span class="pre">BLOCK_OUT</span></code> respectively.</p>
<p>We’ve added extra operators to the matrix multiplication that apply
shifting and clipping to the output in order to mimic a fixed-point
matrix multiplication followed by a rectified linear activation.
We describe the TVM dataflow graph of the fully connected layer below:</p>
<img alt="https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/fc_dataflow.png" class="align-center" src="https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/fc_dataflow.png" />
<p>This computation is intentionally too large to fit onto VTA’s on-chip
buffers all at once. Therefore in the scheduling phase we’ll
rely on computation blocking strategies to break the computation down into
manageable chunks.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Fully connected layer dimensions: 1024 x 1024</span>
<span class="n">batch_size</span> <span class="o">=</span> <span class="mi">1</span>
<span class="n">in_channels</span> <span class="o">=</span> <span class="mi">1024</span>
<span class="n">out_channels</span> <span class="o">=</span> <span class="mi">1024</span>
<span class="k">assert</span> <span class="n">batch_size</span> <span class="o">%</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span> <span class="o">==</span> <span class="mi">0</span>
<span class="k">assert</span> <span class="n">in_channels</span> <span class="o">%</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span> <span class="o">==</span> <span class="mi">0</span>
<span class="k">assert</span> <span class="n">out_channels</span> <span class="o">%</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span> <span class="o">==</span> <span class="mi">0</span>

<span class="c1"># Let&#39;s derive the tiled input tensor shapes</span>
<span class="n">data_shape</span> <span class="o">=</span> <span class="p">(</span><span class="n">batch_size</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">in_channels</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span><span class="p">)</span>
<span class="n">weight_shape</span> <span class="o">=</span> <span class="p">(</span>
    <span class="n">out_channels</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">,</span>
    <span class="n">in_channels</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span><span class="p">,</span>
    <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">,</span>
    <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span><span class="p">,</span>
<span class="p">)</span>
<span class="n">output_shape</span> <span class="o">=</span> <span class="p">(</span><span class="n">batch_size</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">out_channels</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">)</span>
<span class="n">num_ops</span> <span class="o">=</span> <span class="n">in_channels</span> <span class="o">*</span> <span class="n">out_channels</span> <span class="o">*</span> <span class="n">batch_size</span> <span class="o">*</span> <span class="mi">2</span>

<span class="c1"># Reduction axes</span>
<span class="n">ic</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">reduce_axis</span><span class="p">((</span><span class="mi">0</span><span class="p">,</span> <span class="n">in_channels</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;ic&quot;</span><span class="p">)</span>
<span class="n">ic_tns</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">reduce_axis</span><span class="p">((</span><span class="mi">0</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;ic_tns&quot;</span><span class="p">)</span>

<span class="c1"># Input placeholder tensors</span>
<span class="n">data</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">placeholder</span><span class="p">(</span><span class="n">data_shape</span><span class="p">,</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;data&quot;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">env</span><span class="o">.</span><span class="n">inp_dtype</span><span class="p">)</span>
<span class="n">weight</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">placeholder</span><span class="p">(</span><span class="n">weight_shape</span><span class="p">,</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;weight&quot;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">env</span><span class="o">.</span><span class="n">wgt_dtype</span><span class="p">)</span>

<span class="c1"># Copy buffers</span>
<span class="n">data_buf</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">(</span><span class="n">data_shape</span><span class="p">,</span> <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">data</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">),</span> <span class="s2">&quot;data_buf&quot;</span><span class="p">)</span>
<span class="n">weight_buf</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">(</span><span class="n">weight_shape</span><span class="p">,</span> <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">weight</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">),</span> <span class="s2">&quot;weight_buf&quot;</span><span class="p">)</span>

<span class="c1"># Declare matrix multiply computation</span>
<span class="n">res_gemm</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">(</span>
    <span class="n">output_shape</span><span class="p">,</span>
    <span class="k">lambda</span> <span class="n">bo</span><span class="p">,</span> <span class="n">co</span><span class="p">,</span> <span class="n">bi</span><span class="p">,</span> <span class="n">ci</span><span class="p">:</span> <span class="n">te</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span>
        <span class="n">data_buf</span><span class="p">[</span><span class="n">bo</span><span class="p">,</span> <span class="n">ic</span><span class="p">,</span> <span class="n">bi</span><span class="p">,</span> <span class="n">ic_tns</span><span class="p">]</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_dtype</span><span class="p">)</span>
        <span class="o">*</span> <span class="n">weight_buf</span><span class="p">[</span><span class="n">co</span><span class="p">,</span> <span class="n">ic</span><span class="p">,</span> <span class="n">ci</span><span class="p">,</span> <span class="n">ic_tns</span><span class="p">]</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_dtype</span><span class="p">),</span>
        <span class="n">axis</span><span class="o">=</span><span class="p">[</span><span class="n">ic</span><span class="p">,</span> <span class="n">ic_tns</span><span class="p">],</span>
    <span class="p">),</span>
    <span class="n">name</span><span class="o">=</span><span class="s2">&quot;res_gem&quot;</span><span class="p">,</span>
<span class="p">)</span>

<span class="c1"># Add shift stage for fix-point normalization</span>
<span class="n">res_shr</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">(</span><span class="n">output_shape</span><span class="p">,</span> <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">res_gemm</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">)</span> <span class="o">&gt;&gt;</span> <span class="n">env</span><span class="o">.</span><span class="n">INP_WIDTH</span><span class="p">,</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;res_shr&quot;</span><span class="p">)</span>

<span class="c1"># Apply clipping between (0, input max value)</span>
<span class="n">inp_max</span> <span class="o">=</span> <span class="p">(</span><span class="mi">1</span> <span class="o">&lt;&lt;</span> <span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">INP_WIDTH</span> <span class="o">-</span> <span class="mi">1</span><span class="p">))</span> <span class="o">-</span> <span class="mi">1</span>
<span class="n">res_max</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">(</span><span class="n">output_shape</span><span class="p">,</span> <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">tvm</span><span class="o">.</span><span class="n">te</span><span class="o">.</span><span class="n">max</span><span class="p">(</span><span class="n">res_shr</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">),</span> <span class="mi">0</span><span class="p">),</span> <span class="s2">&quot;res_max&quot;</span><span class="p">)</span>
<span class="n">res_min</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">(</span><span class="n">output_shape</span><span class="p">,</span> <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">tvm</span><span class="o">.</span><span class="n">te</span><span class="o">.</span><span class="n">min</span><span class="p">(</span><span class="n">res_max</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">),</span> <span class="n">inp_max</span><span class="p">),</span> <span class="s2">&quot;res_min&quot;</span><span class="p">)</span>

<span class="c1"># Apply typecast to input data type before sending results back</span>
<span class="n">res</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">(</span><span class="n">output_shape</span><span class="p">,</span> <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">res_min</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">inp_dtype</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;res&quot;</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="scheduling-the-computation">
<h2>Scheduling the Computation<a class="headerlink" href="#scheduling-the-computation" title="永久链接至标题">¶</a></h2>
<p>We’ll look at a set of schedule transformations necessary to map the
matrix multiplications onto VTA in an efficient fashion.
Those include:</p>
<ul class="simple">
<li><p>Computation blocking</p></li>
<li><p>Lowering to VTA hardware intrinsics</p></li>
</ul>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Create TVM schedule</span>
<span class="n">s</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">create_schedule</span><span class="p">(</span><span class="n">res</span><span class="o">.</span><span class="n">op</span><span class="p">)</span>
<span class="c1"># Let&#39;s look at the default TVM schedule</span>
<span class="nb">print</span><span class="p">(</span><span class="n">tvm</span><span class="o">.</span><span class="n">lower</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="p">[</span><span class="n">data</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">res</span><span class="p">],</span> <span class="n">simple_mode</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>primfn(data_1: handle, weight_1: handle, res_1: handle) -&gt; ()
  attr = {&quot;from_legacy_te_schedule&quot;: True, &quot;global_symbol&quot;: &quot;main&quot;, &quot;tir.noalias&quot;: True}
  buffers = {res: Buffer(res_2: Pointer(int8), int8, [1, 64, 1, 16], []),
             data: Buffer(data_2: Pointer(int8), int8, [1, 64, 1, 16], []),
             weight: Buffer(weight_2: Pointer(int8), int8, [64, 64, 16, 16], [])}
  buffer_map = {data_1: data, weight_1: weight, res_1: res} {
  allocate(data_buf: Pointer(global int8), int8, [1024]), storage_scope = global;
  allocate(weight_buf: Pointer(global int8), int8, [1048576]), storage_scope = global;
  allocate(res_gem: Pointer(global int32), int32, [1024]), storage_scope = global {
    for (i1: int32, 0, 64) {
      for (i3: int32, 0, 16) {
        data_buf[((i1*16) + i3)] = (int8*)data_2[((i1*16) + i3)]
      }
    }
    for (i0: int32, 0, 64) {
      for (i1_1: int32, 0, 64) {
        for (i2: int32, 0, 16) {
          for (i3_1: int32, 0, 16) {
            weight_buf[((((i0*16384) + (i1_1*256)) + (i2*16)) + i3_1)] = (int8*)weight_2[((((i0*16384) + (i1_1*256)) + (i2*16)) + i3_1)]
          }
        }
      }
    }
    for (co: int32, 0, 64) {
      for (ci: int32, 0, 16) {
        res_gem[((co*16) + ci)] = 0
        for (ic: int32, 0, 64) {
          for (ic_tns: int32, 0, 16) {
            res_gem[((co*16) + ci)] = ((int32*)res_gem[((co*16) + ci)] + (cast(int32, (int8*)data_buf[((ic*16) + ic_tns)])*cast(int32, (int8*)weight_buf[((((co*16384) + (ic*256)) + (ci*16)) + ic_tns)])))
          }
        }
      }
    }
    for (i1_2: int32, 0, 64) {
      for (i3_2: int32, 0, 16) {
        res_gem[((i1_2*16) + i3_2)] = @tir.shift_right((int32*)res_gem[((i1_2*16) + i3_2)], 8, dtype=int32)
      }
    }
    for (i1_3: int32, 0, 64) {
      for (i3_3: int32, 0, 16) {
        res_gem[((i1_3*16) + i3_3)] = max((int32*)res_gem[((i1_3*16) + i3_3)], 0)
      }
    }
    for (i1_4: int32, 0, 64) {
      for (i3_4: int32, 0, 16) {
        res_gem[((i1_4*16) + i3_4)] = min((int32*)res_gem[((i1_4*16) + i3_4)], 127)
      }
    }
    for (i1_5: int32, 0, 64) {
      for (i3_5: int32, 0, 16) {
        res_2[((i1_5*16) + i3_5)] = cast(int8, (int32*)res_gem[((i1_5*16) + i3_5)])
      }
    }
  }
}
</pre></div>
</div>
<div class="section" id="blocking-the-computation">
<h3>Blocking the Computation<a class="headerlink" href="#blocking-the-computation" title="永久链接至标题">¶</a></h3>
<p>The matrix multiplication is by default too large for activations or weights
to fit on VTA’s on-chip buffers all at once.
We block the (1, 1024) by (1024, 1024) matrix multiplication into
smaller (1, 256) by (256, 256) matrix multiplications so the intermediate
tensors can fit on the accelerator’s on-chip SRAM.
This approach is similar to blocking techniques applied to CPUs and GPUs in
order to increase cache hit rate.</p>
<p>We perform blocking along each axes (the batch axis being untouched since
we are performing singe-batch inference).
We also leave the inner-most tensorization axes as-is in order to allow
TVM to pattern-match tensorization.
We show the outcome of blocking on the computation schedule in the diagram
below:</p>
<a class="reference internal image-reference" href="https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/blocking.png"><img alt="https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/blocking.png" class="align-center" src="https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/blocking.png" style="width: 480px;" /></a>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>The code after loop splitting and reordering is equivalent to the following
pseudo-code. We ignore the batch axis since we are only performing single-batch
inference in this example:</p>
<div class="highlight-c notranslate"><div class="highlight"><pre><span></span><span class="k">for</span> <span class="p">(</span><span class="kt">int</span> <span class="n">oc_out</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="n">oc_out</span> <span class="o">&lt;</span> <span class="mi">4</span><span class="p">;</span> <span class="o">++</span><span class="n">oc_out</span><span class="p">)</span> <span class="p">{</span>
  <span class="c1">// Initialization loop</span>
  <span class="k">for</span> <span class="p">(</span><span class="kt">int</span> <span class="n">oc_inn</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="n">oc_inn</span> <span class="o">&lt;</span> <span class="mi">16</span><span class="p">;</span> <span class="o">++</span><span class="n">oc_inn</span><span class="p">)</span> <span class="p">{</span>
   <span class="k">for</span> <span class="p">(</span><span class="kt">int</span> <span class="n">oc_tns</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="n">oc_tns</span> <span class="o">&lt;</span> <span class="mi">16</span><span class="p">;</span> <span class="o">++</span><span class="n">oc_tns</span><span class="p">)</span> <span class="p">{</span>
    <span class="kt">int</span> <span class="n">j</span> <span class="o">=</span> <span class="p">(</span><span class="n">oc_out</span> <span class="o">*</span> <span class="mi">16</span> <span class="o">+</span> <span class="n">oc_inn</span><span class="p">)</span> <span class="o">*</span> <span class="mi">16</span> <span class="o">+</span> <span class="n">oc_tns</span><span class="p">;</span>
    <span class="n">C</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="n">j</span><span class="p">]</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span>
   <span class="p">}</span>
  <span class="p">}</span>
  <span class="k">for</span> <span class="p">(</span><span class="kt">int</span> <span class="n">ic_out</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="n">ic_out</span> <span class="o">&lt;</span> <span class="mi">4</span><span class="p">;</span> <span class="o">++</span><span class="n">ic_out</span><span class="p">)</span> <span class="p">{</span>
   <span class="c1">// Block loop</span>
   <span class="k">for</span> <span class="p">(</span><span class="kt">int</span> <span class="n">oc_inn</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="n">oc_inn</span> <span class="o">&lt;</span> <span class="mi">16</span><span class="p">;</span> <span class="o">++</span><span class="n">oc_inn</span><span class="p">)</span> <span class="p">{</span>
    <span class="k">for</span> <span class="p">(</span><span class="kt">int</span> <span class="n">ic_inn</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="n">ic_inn</span> <span class="o">&lt;</span> <span class="mi">16</span><span class="p">;</span> <span class="o">++</span><span class="n">ic_inn</span><span class="p">)</span> <span class="p">{</span>
     <span class="c1">// Tensorization loop</span>
     <span class="k">for</span> <span class="p">(</span><span class="kt">int</span> <span class="n">oc_tns</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="n">oc_tns</span> <span class="o">&lt;</span> <span class="mi">16</span><span class="p">;</span> <span class="o">++</span><span class="n">oc_tns</span><span class="p">)</span> <span class="p">{</span>
      <span class="k">for</span> <span class="p">(</span><span class="kt">int</span> <span class="n">ic_tns</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="n">ic_tns</span> <span class="o">&lt;</span> <span class="mi">16</span><span class="p">;</span> <span class="o">++</span><span class="n">ic_tns</span><span class="p">)</span> <span class="p">{</span>
       <span class="kt">int</span> <span class="n">i</span> <span class="o">=</span> <span class="p">(</span><span class="n">ic_out</span> <span class="o">*</span> <span class="mi">16</span> <span class="o">+</span> <span class="n">ic_inn</span><span class="p">)</span> <span class="o">*</span> <span class="mi">16</span> <span class="o">+</span> <span class="n">ic_tns</span><span class="p">;</span>
       <span class="kt">int</span> <span class="n">j</span> <span class="o">=</span> <span class="p">(</span><span class="n">oc_out</span> <span class="o">*</span> <span class="mi">16</span> <span class="o">+</span> <span class="n">oc_inn</span><span class="p">)</span> <span class="o">*</span> <span class="mi">16</span> <span class="o">+</span> <span class="n">oc_tns</span><span class="p">;</span>
       <span class="n">C</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">C</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="n">i</span><span class="p">]</span> <span class="o">+</span> <span class="n">A</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="n">i</span><span class="p">]</span> <span class="o">*</span> <span class="n">B</span><span class="p">[</span><span class="n">j</span><span class="p">][</span><span class="n">i</span><span class="p">];</span>
      <span class="p">}</span>
     <span class="p">}</span>
    <span class="p">}</span>
   <span class="p">}</span>
  <span class="p">}</span>
 <span class="p">}</span>
<span class="p">}</span>
</pre></div>
</div>
</div>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Let&#39;s define tiling sizes (expressed in multiples of VTA tensor shape size)</span>
<span class="n">b_block</span> <span class="o">=</span> <span class="mi">1</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span>
<span class="n">i_block</span> <span class="o">=</span> <span class="mi">256</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span>
<span class="n">o_block</span> <span class="o">=</span> <span class="mi">256</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span>

<span class="c1"># Tile the output tensor along the batch and output channel dimensions</span>
<span class="c1"># (since by default we are doing single batch inference, the split along</span>
<span class="c1">#  the batch dimension has no effect)</span>
<span class="n">b</span><span class="p">,</span> <span class="n">oc</span><span class="p">,</span> <span class="n">b_tns</span><span class="p">,</span> <span class="n">oc_tns</span> <span class="o">=</span> <span class="n">s</span><span class="p">[</span><span class="n">res</span><span class="p">]</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span>
<span class="n">b_out</span><span class="p">,</span> <span class="n">b_inn</span> <span class="o">=</span> <span class="n">s</span><span class="p">[</span><span class="n">res</span><span class="p">]</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="n">b</span><span class="p">,</span> <span class="n">b_block</span><span class="p">)</span>
<span class="n">oc_out</span><span class="p">,</span> <span class="n">oc_inn</span> <span class="o">=</span> <span class="n">s</span><span class="p">[</span><span class="n">res</span><span class="p">]</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="n">oc</span><span class="p">,</span> <span class="n">o_block</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">res</span><span class="p">]</span><span class="o">.</span><span class="n">reorder</span><span class="p">(</span><span class="n">b_out</span><span class="p">,</span> <span class="n">oc_out</span><span class="p">,</span> <span class="n">b_inn</span><span class="p">,</span> <span class="n">oc_inn</span><span class="p">)</span>

<span class="c1"># Move intermediate computation into each output compute tile</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_gemm</span><span class="p">]</span><span class="o">.</span><span class="n">compute_at</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">res</span><span class="p">],</span> <span class="n">oc_out</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_shr</span><span class="p">]</span><span class="o">.</span><span class="n">compute_at</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">res</span><span class="p">],</span> <span class="n">oc_out</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_max</span><span class="p">]</span><span class="o">.</span><span class="n">compute_at</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">res</span><span class="p">],</span> <span class="n">oc_out</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_min</span><span class="p">]</span><span class="o">.</span><span class="n">compute_at</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">res</span><span class="p">],</span> <span class="n">oc_out</span><span class="p">)</span>

<span class="c1"># Apply additional loop split along reduction axis (input channel)</span>
<span class="n">b_inn</span><span class="p">,</span> <span class="n">oc_inn</span><span class="p">,</span> <span class="n">b_tns</span><span class="p">,</span> <span class="n">oc_tns</span> <span class="o">=</span> <span class="n">s</span><span class="p">[</span><span class="n">res_gemm</span><span class="p">]</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span>
<span class="n">ic_out</span><span class="p">,</span> <span class="n">ic_inn</span> <span class="o">=</span> <span class="n">s</span><span class="p">[</span><span class="n">res_gemm</span><span class="p">]</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="n">ic</span><span class="p">,</span> <span class="n">i_block</span><span class="p">)</span>

<span class="c1"># Reorder axes. We move the ic_out axis all the way out of the GEMM</span>
<span class="c1"># loop to block along the reduction axis</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_gemm</span><span class="p">]</span><span class="o">.</span><span class="n">reorder</span><span class="p">(</span><span class="n">ic_out</span><span class="p">,</span> <span class="n">b_inn</span><span class="p">,</span> <span class="n">oc_inn</span><span class="p">,</span> <span class="n">ic_inn</span><span class="p">,</span> <span class="n">b_tns</span><span class="p">,</span> <span class="n">oc_tns</span><span class="p">,</span> <span class="n">ic_tns</span><span class="p">)</span>

<span class="c1"># Let&#39;s look at the current TVM schedule after blocking</span>
<span class="nb">print</span><span class="p">(</span><span class="n">tvm</span><span class="o">.</span><span class="n">lower</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="p">[</span><span class="n">data</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">res</span><span class="p">],</span> <span class="n">simple_mode</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>primfn(data_1: handle, weight_1: handle, res_1: handle) -&gt; ()
  attr = {&quot;from_legacy_te_schedule&quot;: True, &quot;global_symbol&quot;: &quot;main&quot;, &quot;tir.noalias&quot;: True}
  buffers = {res: Buffer(res_2: Pointer(int8), int8, [1, 64, 1, 16], []),
             data: Buffer(data_2: Pointer(int8), int8, [1, 64, 1, 16], []),
             weight: Buffer(weight_2: Pointer(int8), int8, [64, 64, 16, 16], [])}
  buffer_map = {data_1: data, weight_1: weight, res_1: res} {
  allocate(data_buf: Pointer(global int8), int8, [1024]), storage_scope = global;
  allocate(weight_buf: Pointer(global int8), int8, [1048576]), storage_scope = global;
  allocate(res_gem: Pointer(global int32), int32, [256]), storage_scope = global {
    for (i1: int32, 0, 64) {
      for (i3: int32, 0, 16) {
        data_buf[((i1*16) + i3)] = (int8*)data_2[((i1*16) + i3)]
      }
    }
    for (i0: int32, 0, 64) {
      for (i1_1: int32, 0, 64) {
        for (i2: int32, 0, 16) {
          for (i3_1: int32, 0, 16) {
            weight_buf[((((i0*16384) + (i1_1*256)) + (i2*16)) + i3_1)] = (int8*)weight_2[((((i0*16384) + (i1_1*256)) + (i2*16)) + i3_1)]
          }
        }
      }
    }
    for (i1.outer: int32, 0, 4) {
      for (co.init: int32, 0, 16) {
        for (ci.init: int32, 0, 16) {
          res_gem[((co.init*16) + ci.init)] = 0
        }
      }
      for (ic.outer: int32, 0, 4) {
        for (co: int32, 0, 16) {
          for (ic.inner: int32, 0, 16) {
            for (ci: int32, 0, 16) {
              for (ic_tns: int32, 0, 16) {
                res_gem[((co*16) + ci)] = ((int32*)res_gem[((co*16) + ci)] + (cast(int32, (int8*)data_buf[(((ic.outer*256) + (ic.inner*16)) + ic_tns)])*cast(int32, (int8*)weight_buf[((((((i1.outer*262144) + (co*16384)) + (ic.outer*4096)) + (ic.inner*256)) + (ci*16)) + ic_tns)])))
              }
            }
          }
        }
      }
      for (i1_2: int32, 0, 16) {
        for (i3_2: int32, 0, 16) {
          res_gem[((i1_2*16) + i3_2)] = @tir.shift_right((int32*)res_gem[((i1_2*16) + i3_2)], 8, dtype=int32)
        }
      }
      for (i1_3: int32, 0, 16) {
        for (i3_3: int32, 0, 16) {
          res_gem[((i1_3*16) + i3_3)] = max((int32*)res_gem[((i1_3*16) + i3_3)], 0)
        }
      }
      for (i1_4: int32, 0, 16) {
        for (i3_4: int32, 0, 16) {
          res_gem[((i1_4*16) + i3_4)] = min((int32*)res_gem[((i1_4*16) + i3_4)], 127)
        }
      }
      for (i1.inner: int32, 0, 16) {
        for (i3_5: int32, 0, 16) {
          res_2[(((i1.outer*256) + (i1.inner*16)) + i3_5)] = cast(int8, (int32*)res_gem[((i1.inner*16) + i3_5)])
        }
      }
    }
  }
}
</pre></div>
</div>
</div>
<div class="section" id="lowering-copies-to-dma-transfers">
<h3>Lowering Copies to DMA Transfers<a class="headerlink" href="#lowering-copies-to-dma-transfers" title="永久链接至标题">¶</a></h3>
<p>Next we set the buffer scopes to the corresponding on-chip VTA SRAM buffers.
We move the load loops into the matrix multiply computation loop to stage
memory loads such that they fit in the on-chip SRAM buffers.
Finally we annotate the load/store loop outer axes with the DMA copy pragma
to perform bulk memory transfers on VTA.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Set scope of SRAM buffers</span>
<span class="n">s</span><span class="p">[</span><span class="n">data_buf</span><span class="p">]</span><span class="o">.</span><span class="n">set_scope</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">inp_scope</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">weight_buf</span><span class="p">]</span><span class="o">.</span><span class="n">set_scope</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">wgt_scope</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_gemm</span><span class="p">]</span><span class="o">.</span><span class="n">set_scope</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_scope</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_shr</span><span class="p">]</span><span class="o">.</span><span class="n">set_scope</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_scope</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_min</span><span class="p">]</span><span class="o">.</span><span class="n">set_scope</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_scope</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_max</span><span class="p">]</span><span class="o">.</span><span class="n">set_scope</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_scope</span><span class="p">)</span>

<span class="c1"># Block data and weight cache reads</span>
<span class="n">s</span><span class="p">[</span><span class="n">data_buf</span><span class="p">]</span><span class="o">.</span><span class="n">compute_at</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">res_gemm</span><span class="p">],</span> <span class="n">ic_out</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">weight_buf</span><span class="p">]</span><span class="o">.</span><span class="n">compute_at</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">res_gemm</span><span class="p">],</span> <span class="n">ic_out</span><span class="p">)</span>

<span class="c1"># Use DMA copy pragma on DRAM-&gt;SRAM operations</span>
<span class="n">s</span><span class="p">[</span><span class="n">data_buf</span><span class="p">]</span><span class="o">.</span><span class="n">pragma</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">data_buf</span><span class="p">]</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">env</span><span class="o">.</span><span class="n">dma_copy</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">weight_buf</span><span class="p">]</span><span class="o">.</span><span class="n">pragma</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">weight_buf</span><span class="p">]</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">env</span><span class="o">.</span><span class="n">dma_copy</span><span class="p">)</span>

<span class="c1"># Use DMA copy pragma on SRAM-&gt;DRAM operation</span>
<span class="c1"># (this implies that these copies should be performed along b_inn,</span>
<span class="c1"># or result axis 2)</span>
<span class="n">s</span><span class="p">[</span><span class="n">res</span><span class="p">]</span><span class="o">.</span><span class="n">pragma</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">res</span><span class="p">]</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span><span class="p">[</span><span class="mi">2</span><span class="p">],</span> <span class="n">env</span><span class="o">.</span><span class="n">dma_copy</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="lowering-computation-to-vta-compute-intrinsics">
<h3>Lowering Computation to VTA Compute Intrinsics<a class="headerlink" href="#lowering-computation-to-vta-compute-intrinsics" title="永久链接至标题">¶</a></h3>
<p>The last phase is to lower the computation loops down to VTA hardware
intrinsics by mapping the matrix multiplication to tensor intrinsics,
and mapping the shift, and clipping computation to the vector ALU.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Apply tensorization over the batch tensor tile axis</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_gemm</span><span class="p">]</span><span class="o">.</span><span class="n">tensorize</span><span class="p">(</span><span class="n">b_tns</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">gemm</span><span class="p">)</span>

<span class="c1"># Add an ALU pragma over the shift and clipping operations</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_shr</span><span class="p">]</span><span class="o">.</span><span class="n">pragma</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">res_shr</span><span class="p">]</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">env</span><span class="o">.</span><span class="n">alu</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_min</span><span class="p">]</span><span class="o">.</span><span class="n">pragma</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">res_min</span><span class="p">]</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">env</span><span class="o">.</span><span class="n">alu</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">res_max</span><span class="p">]</span><span class="o">.</span><span class="n">pragma</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">res_max</span><span class="p">]</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">env</span><span class="o">.</span><span class="n">alu</span><span class="p">)</span>

<span class="c1"># Let&#39;s look at the final lowered TVM schedule after lowering memory</span>
<span class="c1"># loads/stores down to DMA copy intrinsics, and the computation down to</span>
<span class="c1"># VTA compute intrinsics.</span>
<span class="nb">print</span><span class="p">(</span><span class="n">vta</span><span class="o">.</span><span class="n">lower</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="p">[</span><span class="n">data</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">res</span><span class="p">],</span> <span class="n">simple_mode</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>primfn(data_1: handle, weight_1: handle, res_1: handle) -&gt; ()
  attr = {&quot;from_legacy_te_schedule&quot;: True, &quot;global_symbol&quot;: &quot;main&quot;, &quot;tir.noalias&quot;: True}
  buffers = {res: Buffer(res_2: Pointer(int8), int8, [1, 64, 1, 16], []),
             data: Buffer(data_2: Pointer(int8), int8, [1, 64, 1, 16], []),
             weight: Buffer(weight_2: Pointer(int8), int8, [64, 64, 16, 16], [])}
  buffer_map = {data_1: data, weight_1: weight, res_1: res} {
  @tir.vta.coproc_dep_push(3, 2, dtype=int32)
  for (i1.outer: int32, 0, 4) {
    attr [IterVar(vta: int32, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_scope&quot; = 2 {
      @tir.vta.coproc_dep_pop(3, 2, dtype=int32)
      attr [IterVar(vta, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_uop_scope&quot; = &quot;VTAPushGEMMOp&quot; {
        @tir.call_extern(&quot;VTAUopLoopBegin&quot;, 16, 1, 0, 0, dtype=int32)
        @tir.vta.uop_push(0, 1, 0, 0, 0, 0, 0, 0, dtype=int32)
        @tir.call_extern(&quot;VTAUopLoopEnd&quot;, dtype=int32)
      }
      @tir.vta.coproc_dep_push(2, 1, dtype=int32)
    }
    for (ic.outer: int32, 0, 4) {
      attr [IterVar(vta, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_scope&quot; = 1 {
        @tir.vta.coproc_dep_pop(2, 1, dtype=int32)
        @tir.call_extern(&quot;VTALoadBuffer2D&quot;, @tir.tvm_thread_context(@tir.vta.command_handle(, dtype=handle), dtype=handle), data_2, (ic.outer*16), 16, 1, 16, 0, 0, 0, 0, 0, 2, dtype=int32)
        @tir.call_extern(&quot;VTALoadBuffer2D&quot;, @tir.tvm_thread_context(@tir.vta.command_handle(, dtype=handle), dtype=handle), weight_2, ((i1.outer*1024) + (ic.outer*16)), 16, 16, 64, 0, 0, 0, 0, 0, 1, dtype=int32)
        @tir.vta.coproc_dep_push(1, 2, dtype=int32)
      }
      attr [IterVar(vta, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_scope&quot; = 2 {
        @tir.vta.coproc_dep_pop(1, 2, dtype=int32)
        attr [IterVar(vta, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_uop_scope&quot; = &quot;VTAPushGEMMOp&quot; {
          @tir.call_extern(&quot;VTAUopLoopBegin&quot;, 16, 1, 0, 16, dtype=int32)
          @tir.call_extern(&quot;VTAUopLoopBegin&quot;, 16, 0, 1, 1, dtype=int32)
          @tir.vta.uop_push(0, 0, 0, 0, 0, 0, 0, 0, dtype=int32)
          @tir.call_extern(&quot;VTAUopLoopEnd&quot;, dtype=int32)
          @tir.call_extern(&quot;VTAUopLoopEnd&quot;, dtype=int32)
        }
        @tir.vta.coproc_dep_push(2, 1, dtype=int32)
      }
    }
    @tir.vta.coproc_dep_pop(2, 1, dtype=int32)
    attr [IterVar(vta, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_scope&quot; = 2 {
      attr [IterVar(vta, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_uop_scope&quot; = &quot;VTAPushALUOp&quot; {
        @tir.call_extern(&quot;VTAUopLoopBegin&quot;, 16, 1, 1, 0, dtype=int32)
        @tir.vta.uop_push(1, 0, 0, 0, 0, 3, 1, 8, dtype=int32)
        @tir.call_extern(&quot;VTAUopLoopEnd&quot;, dtype=int32)
      }
      attr [IterVar(vta, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_uop_scope&quot; = &quot;VTAPushALUOp&quot; {
        @tir.call_extern(&quot;VTAUopLoopBegin&quot;, 16, 1, 1, 0, dtype=int32)
        @tir.vta.uop_push(1, 0, 0, 0, 0, 1, 1, 0, dtype=int32)
        @tir.call_extern(&quot;VTAUopLoopEnd&quot;, dtype=int32)
      }
      attr [IterVar(vta, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_uop_scope&quot; = &quot;VTAPushALUOp&quot; {
        @tir.call_extern(&quot;VTAUopLoopBegin&quot;, 16, 1, 1, 0, dtype=int32)
        @tir.vta.uop_push(1, 0, 0, 0, 0, 0, 1, 127, dtype=int32)
        @tir.call_extern(&quot;VTAUopLoopEnd&quot;, dtype=int32)
      }
      @tir.vta.coproc_dep_push(2, 3, dtype=int32)
    }
    attr [IterVar(vta, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_scope&quot; = 3 {
      @tir.vta.coproc_dep_pop(2, 3, dtype=int32)
      for (i1.inner: int32, 0, 16) {
        @tir.call_extern(&quot;VTAStoreBuffer2D&quot;, @tir.tvm_thread_context(@tir.vta.command_handle(, dtype=handle), dtype=handle), i1.inner, 4, res_2, ((i1.outer*16) + i1.inner), 1, 1, 1, dtype=int32)
      }
      @tir.vta.coproc_dep_push(3, 2, dtype=int32)
    }
  }
  @tir.vta.coproc_sync(, dtype=int32)
  @tir.vta.coproc_dep_pop(3, 2, dtype=int32)
}
</pre></div>
</div>
</div>
</div>
<div class="section" id="tvm-compilation-and-verification">
<h2>TVM Compilation and Verification<a class="headerlink" href="#tvm-compilation-and-verification" title="永久链接至标题">¶</a></h2>
<p>After specifying the schedule, we can compile it into a TVM function.
We save the module so we can send it over RPC.
We run the function and verify it against a numpy implementation to
ensure correctness.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Compile the TVM module</span>
<span class="n">my_gemm</span> <span class="o">=</span> <span class="n">vta</span><span class="o">.</span><span class="n">build</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="p">[</span><span class="n">data</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">res</span><span class="p">],</span> <span class="s2">&quot;ext_dev&quot;</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">target_host</span><span class="p">,</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;my_gemm&quot;</span><span class="p">)</span>
<span class="n">temp</span> <span class="o">=</span> <span class="n">utils</span><span class="o">.</span><span class="n">tempdir</span><span class="p">()</span>
<span class="n">my_gemm</span><span class="o">.</span><span class="n">save</span><span class="p">(</span><span class="n">temp</span><span class="o">.</span><span class="n">relpath</span><span class="p">(</span><span class="s2">&quot;gemm.o&quot;</span><span class="p">))</span>
<span class="n">remote</span><span class="o">.</span><span class="n">upload</span><span class="p">(</span><span class="n">temp</span><span class="o">.</span><span class="n">relpath</span><span class="p">(</span><span class="s2">&quot;gemm.o&quot;</span><span class="p">))</span>
<span class="n">f</span> <span class="o">=</span> <span class="n">remote</span><span class="o">.</span><span class="n">load_module</span><span class="p">(</span><span class="s2">&quot;gemm.o&quot;</span><span class="p">)</span>

<span class="c1"># Get the remote device context</span>
<span class="n">ctx</span> <span class="o">=</span> <span class="n">remote</span><span class="o">.</span><span class="n">ext_dev</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span>

<span class="c1"># Initialize the data and weight arrays randomly in the int range of (-128, 128]</span>
<span class="n">data_np</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">randint</span><span class="p">(</span><span class="o">-</span><span class="mi">128</span><span class="p">,</span> <span class="mi">128</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="p">(</span><span class="n">batch_size</span><span class="p">,</span> <span class="n">in_channels</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">data</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
<span class="n">weight_np</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">randint</span><span class="p">(</span><span class="o">-</span><span class="mi">128</span><span class="p">,</span> <span class="mi">128</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="p">(</span><span class="n">out_channels</span><span class="p">,</span> <span class="n">in_channels</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>

<span class="c1"># Apply packing to the data and weight arrays from a 2D to a 4D packed layout</span>
<span class="n">data_packed</span> <span class="o">=</span> <span class="n">data_np</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span>
    <span class="n">batch_size</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">in_channels</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span>
<span class="p">)</span><span class="o">.</span><span class="n">transpose</span><span class="p">((</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>
<span class="n">weight_packed</span> <span class="o">=</span> <span class="n">weight_np</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span>
    <span class="n">out_channels</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">,</span> <span class="n">in_channels</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span>
<span class="p">)</span><span class="o">.</span><span class="n">transpose</span><span class="p">((</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>

<span class="c1"># Format the input/output arrays with tvm.nd.array to the DLPack standard</span>
<span class="n">data_nd</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">data_packed</span><span class="p">,</span> <span class="n">ctx</span><span class="p">)</span>
<span class="n">weight_nd</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">weight_packed</span><span class="p">,</span> <span class="n">ctx</span><span class="p">)</span>
<span class="n">res_nd</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">output_shape</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">res</span><span class="o">.</span><span class="n">dtype</span><span class="p">),</span> <span class="n">ctx</span><span class="p">)</span>

<span class="c1"># Clear stats</span>
<span class="k">if</span> <span class="n">env</span><span class="o">.</span><span class="n">TARGET</span> <span class="ow">in</span> <span class="p">[</span><span class="s2">&quot;sim&quot;</span><span class="p">,</span> <span class="s2">&quot;tsim&quot;</span><span class="p">]:</span>
    <span class="n">simulator</span><span class="o">.</span><span class="n">clear_stats</span><span class="p">()</span>

<span class="c1"># Invoke the module to perform the computation</span>
<span class="n">f</span><span class="p">(</span><span class="n">data_nd</span><span class="p">,</span> <span class="n">weight_nd</span><span class="p">,</span> <span class="n">res_nd</span><span class="p">)</span>

<span class="c1"># Verify against numpy implementation</span>
<span class="n">res_ref</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">dot</span><span class="p">(</span><span class="n">data_np</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_dtype</span><span class="p">),</span> <span class="n">weight_np</span><span class="o">.</span><span class="n">T</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_dtype</span><span class="p">))</span>
<span class="n">res_ref</span> <span class="o">=</span> <span class="n">res_ref</span> <span class="o">&gt;&gt;</span> <span class="n">env</span><span class="o">.</span><span class="n">INP_WIDTH</span>
<span class="n">res_ref</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">clip</span><span class="p">(</span><span class="n">res_ref</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">inp_max</span><span class="p">)</span>
<span class="n">res_ref</span> <span class="o">=</span> <span class="n">res_ref</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">res</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
<span class="n">res_ref</span> <span class="o">=</span> <span class="n">res_ref</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span>
    <span class="n">batch_size</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">out_channels</span> <span class="o">//</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span>
<span class="p">)</span><span class="o">.</span><span class="n">transpose</span><span class="p">((</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>
<span class="n">np</span><span class="o">.</span><span class="n">testing</span><span class="o">.</span><span class="n">assert_equal</span><span class="p">(</span><span class="n">res_ref</span><span class="p">,</span> <span class="n">res_nd</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>

<span class="c1"># Print stats</span>
<span class="k">if</span> <span class="n">env</span><span class="o">.</span><span class="n">TARGET</span> <span class="ow">in</span> <span class="p">[</span><span class="s2">&quot;sim&quot;</span><span class="p">,</span> <span class="s2">&quot;tsim&quot;</span><span class="p">]:</span>
    <span class="n">sim_stats</span> <span class="o">=</span> <span class="n">simulator</span><span class="o">.</span><span class="n">stats</span><span class="p">()</span>
    <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Execution statistics:&quot;</span><span class="p">)</span>
    <span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="n">sim_stats</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
        <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;</span><span class="se">\t</span><span class="si">{:&lt;16}</span><span class="s2">: </span><span class="si">{:&gt;16}</span><span class="s2">&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">k</span><span class="p">,</span> <span class="n">v</span><span class="p">))</span>

<span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Successful blocked matrix multiply test!&quot;</span><span class="p">)</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>Execution statistics:
        inp_load_nbytes :             4096
        wgt_load_nbytes :          1048576
        acc_load_nbytes :                0
        uop_load_nbytes :               20
        out_store_nbytes:             1024
        gemm_counter    :             4096
        alu_counter     :              192
Successful blocked matrix multiply test!
</pre></div>
</div>
</div>
<div class="section" id="summary">
<h2>总结<a class="headerlink" href="#summary" title="永久链接至标题">¶</a></h2>
<p>This tutorial demonstrates how TVM scheduling primitives can achieve
computation blocking for a matrix multiplication example.
This allows us to map arbitrarily large computation onto limited
hardware accelerator resources.</p>
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-topic-vta-tutorials-optimize-matrix-multiply-opt-py">
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../../../../_downloads/822e9d945c0bbf1cf23fc4f53c1b7906/matrix_multiply_opt.py"><code class="xref download docutils literal notranslate"><span class="pre">Python</span> <span class="pre">源码下载:</span> <span class="pre">matrix_multiply_opt.py</span></code></a></p>
</div>
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../../../../_downloads/4d3f955a709b320db0d42740fead8ac1/matrix_multiply_opt.ipynb"><code class="xref download docutils literal notranslate"><span class="pre">Jupyter</span> <span class="pre">notebook</span> <span class="pre">下载:</span> <span class="pre">matrix_multiply_opt.ipynb</span></code></a></p>
</div>
</div>
<p class="sphx-glr-signature"><a class="reference external" href="https://sphinx-gallery.github.io">Gallery generated by Sphinx-Gallery</a></p>
</div>
</div>


           </div>
           
          </div>
          

<footer>

    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="../autotvm/tune_alu_vta.html" class="btn btn-neutral float-right" title="Auto-tuning a ALU fused op on VTA" accesskey="n" rel="next">下一个 <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="convolution_opt.html" class="btn btn-neutral float-left" title="2D Convolution Optimization" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> 上一个</a>
      
    </div>

<div id="button" class="backtop"><img src="../../../../_static//img/right.svg" alt="backtop"/> </div>
<section class="footerSec">
    <div class="footerHeader">
      <ul class="d-flex align-md-items-center justify-content-between flex-column flex-md-row">
        <li class="copywrite d-flex align-items-center">
          <h5 id="copy-right-info">© 2020 Apache Software Foundation | All right reserved</h5>
        </li>
      </ul>

    </div>

    <ul>
      <li class="footernote">Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather, and the Apache TVM project logo are either trademarks or registered trademarks of the Apache Software Foundation.</li>
    </ul>

</section>
</footer>
        </div>
      </div>

    </section>

  </div>
  

    <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script>
    <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" crossorigin="anonymous"></script>

  </body>
  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
    <!-- Theme Analytics -->
    <script>
    (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
      (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
      m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
    })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

    ga('create', 'UA-75982049-2', 'auto');
    ga('send', 'pageview');
    </script>

    
   

</body>
</html>