





<!DOCTYPE html>
<html class="writer-html5" lang="zh-CN" >
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>从 VTA 开始 &mdash; tvm 0.8.dev1982 文档</title>
  

  
  <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
  <link rel="stylesheet" href="../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/gallery.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/css/tlcpack_theme.css" type="text/css" />

  
  
    <link rel="shortcut icon" href="../../../_static/tvm-logo-square.png"/>
  

  
  
  
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../../" src="../../../_static/documentation_options.js"></script>
        <script data-url_root="../../../" id="documentation_options" src="../../../_static/documentation_options.js"></script>
        <script src="../../../_static/jquery.js"></script>
        <script src="../../../_static/underscore.js"></script>
        <script src="../../../_static/doctools.js"></script>
        <script src="../../../_static/translations.js"></script>
    
    <script type="text/javascript" src="../../../_static/js/theme.js"></script>

    
    <script type="text/javascript" src="../../../_static/js/tlcpack_theme.js"></script>
    <link rel="index" title="索引" href="../../../genindex.html" />
    <link rel="search" title="搜索" href="../../../search.html" />
    <link rel="next" title="Deploy Pretrained Vision Model from MxNet on VTA" href="frontend/deploy_classification.html" />
    <link rel="prev" title="Simple Matrix Multiply" href="matrix_multiply.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    
<header class="header">
    <div class="innercontainer">
      <div class="headerInner d-flex justify-content-between align-items-center">
          <div class="headerLogo">
               <a href="https://tvm.apache.org/"><img src=https://tvm.apache.org/assets/images/logo.svg alt="logo"></a>
          </div>

          <div id="headMenu" class="headerNav">
            <button type="button" id="closeHeadMenu" class="navCloseBtn"><img src="../../../_static/img/close-icon.svg" alt="Close"></button>
             <ul class="nav">
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/community>Community</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/download>Download</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/vta>VTA</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/blog>Blog</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/docs>Docs</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmconf.org>Conference</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://github.com/apache/tvm/>Github</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmchinese.github.io/declaration_zh_CN.html>About-Translators</a>
                </li>
             </ul>
               <div class="responsivetlcdropdown">
                 <button type="button" class="btn-link">
                   ASF
                 </button>
                 <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                 </ul>
               </div>
          </div>
            <div class="responsiveMenuIcon">
              <button type="button" id="menuBtn" class="btn-menu"><img src="../../../_static/img/menu-icon.svg" alt="Menu Icon"></button>
            </div>

            <div class="tlcDropdown">
              <div class="dropdown">
                <button type="button" class="btn-link dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
                  ASF
                </button>
                <div class="dropdown-menu dropdown-menu-right">
                  <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                  </ul>
                </div>
              </div>
          </div>
       </div>
    </div>
 </header>
 
    <nav data-toggle="wy-nav-shift" class="wy-nav-side fixed">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../../index.html">
          

          
            
            <img src="../../../_static/tvm-logo-small.png" class="logo" alt="Logo"/>
          
          </a>

          
            
            
                <div class="version">
                  0.8.dev1982
                </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption" role="heading"><span class="caption-text">如何开始</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../install/index.html">安装 TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../contribute/index.html">贡献者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">用户引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../tutorial/index.html">User Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../how_to/index.html">How To Guides</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">开发者引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../dev/tutorial/index.html">Developer Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../dev/how_to/how_to.html">开发者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">架构指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../arch/index.html">Design and Architecture</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">主题引导</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../../microtvm/index.html">microTVM：裸机使用TVM</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="../index.html">VTA: Versatile Tensor Accelerator</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="../install.html">VTA安装指南</a></li>
<li class="toctree-l2"><a class="reference internal" href="../dev/index.html">VTA设计与开发指南</a></li>
<li class="toctree-l2 current"><a class="reference internal" href="index.html">VTA教程</a><ul class="current">
<li class="toctree-l3"><a class="reference internal" href="matrix_multiply.html">Simple Matrix Multiply</a></li>
<li class="toctree-l3 current"><a class="current reference internal" href="#">从 VTA 开始</a><ul>
<li class="toctree-l4"><a class="reference internal" href="#loading-in-vta-parameters">Loading in VTA Parameters</a></li>
<li class="toctree-l4"><a class="reference internal" href="#input-placeholders">Input Placeholders</a></li>
<li class="toctree-l4"><a class="reference internal" href="#copy-buffers">Copy Buffers</a></li>
<li class="toctree-l4"><a class="reference internal" href="#vector-addition">Vector Addition</a></li>
<li class="toctree-l4"><a class="reference internal" href="#casting-the-results">Casting the Results</a></li>
<li class="toctree-l4"><a class="reference internal" href="#default-schedule">Default Schedule</a></li>
<li class="toctree-l4"><a class="reference internal" href="#buffer-scopes">Buffer Scopes</a></li>
<li class="toctree-l4"><a class="reference internal" href="#dma-transfers">DMA Transfers</a></li>
<li class="toctree-l4"><a class="reference internal" href="#alu-operations">ALU Operations</a></li>
<li class="toctree-l4"><a class="reference internal" href="#saving-the-module">Saving the Module</a></li>
<li class="toctree-l4"><a class="reference internal" href="#loading-the-module">Loading the Module</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="index.html#compile-deep-learning-models">编译深度学习模型</a></li>
<li class="toctree-l3"><a class="reference internal" href="index.html#optimize-tensor-operators">优化张量算子</a></li>
<li class="toctree-l3"><a class="reference internal" href="index.html#auto-tuning">自动调整</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#literature">Literature</a></li>
</ul>
</li>
</ul>
<p class="caption" role="heading"><span class="caption-text">参考指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/langref/index.html">语言参考</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/api/python/index.html">Python API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/api/links.html">Other APIs</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../reference/publications.html">Publications</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../genindex.html">索引</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
      
      <nav class="wy-nav-top" aria-label="top navigation" data-toggle="wy-nav-top">
        
            <div class="togglemenu">

            </div>
            <div class="nav-content">
              <!-- tvm -->
              Table of content
            </div>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        

          




















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../../index.html">Docs</a> <span class="br-arrow">></span></li>
        
          <li><a href="../index.html">VTA: Versatile Tensor Accelerator</a> <span class="br-arrow">></span></li>
        
          <li><a href="index.html">VTA教程</a> <span class="br-arrow">></span></li>
        
      <li>从 VTA 开始</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
            <a href="../../../_sources/topic/vta/tutorials/vta_get_started.rst.txt" rel="nofollow"> <img src="../../../_static//img/source.svg" alt="viewsource"/></a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="sphx-glr-download-link-note admonition note">
<p class="admonition-title">注解</p>
<p>Click <a class="reference internal" href="#sphx-glr-download-topic-vta-tutorials-vta-get-started-py"><span class="std std-ref">here</span></a> to download the full example code</p>
</div>
<div class="sphx-glr-example-title section" id="get-started-with-vta">
<span id="vta-get-started"></span><span id="sphx-glr-topic-vta-tutorials-vta-get-started-py"></span><h1>从 VTA 开始<a class="headerlink" href="#get-started-with-vta" title="永久链接至标题">¶</a></h1>
<p><strong>作者</strong>: <a class="reference external" href="https://homes.cs.washington.edu/~moreau/">Thierry Moreau</a></p>
<p>这是一份关于如何使用 TVM 来完成VTA编程设计的介绍教程。</p>
<p>在此教程中，我们将演示基本的 TVM 工作流程，在基于VTA 设计的向量 ALU 上执行向量加法。此过程包括特定的调度转换，这对于将计算降低至低级加速器操作是必要的。</p>
<p>首先我们需要导入 TVM ，这是我们的深度学习优化编译器。除此之外，我们还需要导入 VTA Python 包，其中包含针对 TVM 的 VTA 特定扩展，它是针对VTA设计的。</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">__future__</span> <span class="k">import</span> <span class="n">absolute_import</span><span class="p">,</span> <span class="n">print_function</span>

<span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">tvm</span>
<span class="kn">from</span> <span class="nn">tvm</span> <span class="k">import</span> <span class="n">te</span>
<span class="kn">import</span> <span class="nn">vta</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
</pre></div>
</div>
<div class="section" id="loading-in-vta-parameters">
<h2>Loading in VTA Parameters<a class="headerlink" href="#loading-in-vta-parameters" title="永久链接至标题">¶</a></h2>
<p>VTA 是一种模块化和可定制化的设计。因此，用户可以自由修改影响硬件设计布局的高级别的硬件参数，它们在  <code class="code docutils literal notranslate"><span class="pre">vta_config.json</span></code>  文件通过 <code class="code docutils literal notranslate"><span class="pre">log2</span></code> 的值来指定。另外，VTA 参数可以通过函数 <code class="code docutils literal notranslate"><span class="pre">vta.get_env</span></code> 进行加载。</p>
<p>最后，TVM 的目标也在 <code class="code docutils literal notranslate"><span class="pre">vta_config.json</span></code>  文件中指定。设置为 <em>sim</em> 时，将在行为 VTA 模拟器内执行。如果您想在 Pynq FPGA 开发平台上运行此教程，请参照 <em>VTA Pynq-Based Testing Setup</em> 指南。</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">env</span> <span class="o">=</span> <span class="n">vta</span><span class="o">.</span><span class="n">get_env</span><span class="p">()</span>
</pre></div>
</div>
<div class="section" id="fpga-programming">
<h3>FPGA 编程<a class="headerlink" href="#fpga-programming" title="永久链接至标题">¶</a></h3>
<p>When targeting the Pynq FPGA development board, we need to configure
the board with a VTA bitstream.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># We&#39;ll need the TVM RPC module and the VTA simulator module</span>
<span class="kn">from</span> <span class="nn">tvm</span> <span class="k">import</span> <span class="n">rpc</span>
<span class="kn">from</span> <span class="nn">tvm.contrib</span> <span class="k">import</span> <span class="n">utils</span>
<span class="kn">from</span> <span class="nn">vta.testing</span> <span class="k">import</span> <span class="n">simulator</span>

<span class="c1"># We read the Pynq RPC host IP address and port number from the OS environment</span>
<span class="n">host</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;VTA_RPC_HOST&quot;</span><span class="p">,</span> <span class="s2">&quot;192.168.2.99&quot;</span><span class="p">)</span>
<span class="n">port</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;VTA_RPC_PORT&quot;</span><span class="p">,</span> <span class="s2">&quot;9091&quot;</span><span class="p">))</span>

<span class="c1"># We configure both the bitstream and the runtime system on the Pynq</span>
<span class="c1"># to match the VTA configuration specified by the vta_config.json file.</span>
<span class="k">if</span> <span class="n">env</span><span class="o">.</span><span class="n">TARGET</span> <span class="o">==</span> <span class="s2">&quot;pynq&quot;</span> <span class="ow">or</span> <span class="n">env</span><span class="o">.</span><span class="n">TARGET</span> <span class="o">==</span> <span class="s2">&quot;de10nano&quot;</span><span class="p">:</span>

    <span class="c1"># Make sure that TVM was compiled with RPC=1</span>
    <span class="k">assert</span> <span class="n">tvm</span><span class="o">.</span><span class="n">runtime</span><span class="o">.</span><span class="n">enabled</span><span class="p">(</span><span class="s2">&quot;rpc&quot;</span><span class="p">)</span>
    <span class="n">remote</span> <span class="o">=</span> <span class="n">rpc</span><span class="o">.</span><span class="n">connect</span><span class="p">(</span><span class="n">host</span><span class="p">,</span> <span class="n">port</span><span class="p">)</span>

    <span class="c1"># Reconfigure the JIT runtime</span>
    <span class="n">vta</span><span class="o">.</span><span class="n">reconfig_runtime</span><span class="p">(</span><span class="n">remote</span><span class="p">)</span>

    <span class="c1"># Program the FPGA with a pre-compiled VTA bitstream.</span>
    <span class="c1"># You can program the FPGA with your own custom bitstream</span>
    <span class="c1"># by passing the path to the bitstream file instead of None.</span>
    <span class="n">vta</span><span class="o">.</span><span class="n">program_fpga</span><span class="p">(</span><span class="n">remote</span><span class="p">,</span> <span class="n">bitstream</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>

<span class="c1"># In simulation mode, host the RPC server locally.</span>
<span class="k">elif</span> <span class="n">env</span><span class="o">.</span><span class="n">TARGET</span> <span class="ow">in</span> <span class="p">(</span><span class="s2">&quot;sim&quot;</span><span class="p">,</span> <span class="s2">&quot;tsim&quot;</span><span class="p">,</span> <span class="s2">&quot;intelfocl&quot;</span><span class="p">):</span>
    <span class="n">remote</span> <span class="o">=</span> <span class="n">rpc</span><span class="o">.</span><span class="n">LocalSession</span><span class="p">()</span>

    <span class="k">if</span> <span class="n">env</span><span class="o">.</span><span class="n">TARGET</span> <span class="ow">in</span> <span class="p">[</span><span class="s2">&quot;intelfocl&quot;</span><span class="p">]:</span>
        <span class="c1"># program intelfocl aocx</span>
        <span class="n">vta</span><span class="o">.</span><span class="n">program_fpga</span><span class="p">(</span><span class="n">remote</span><span class="p">,</span> <span class="n">bitstream</span><span class="o">=</span><span class="s2">&quot;vta.bitstream&quot;</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="computation-declaration">
<h3>Computation Declaration<a class="headerlink" href="#computation-declaration" title="永久链接至标题">¶</a></h3>
<p>As a first step, we need to describe our computation.
TVM adopts tensor semantics, with each intermediate result
represented as multi-dimensional array. The user needs to describe
the computation rule that generates the output tensors.</p>
<p>In this example we describe a vector addition, which requires multiple
computation stages, as shown in the dataflow diagram below.
First we describe the input tensors <code class="code docutils literal notranslate"><span class="pre">A</span></code> and <code class="code docutils literal notranslate"><span class="pre">B</span></code> that are living
in main memory.
Second, we need to declare intermediate tensors <code class="code docutils literal notranslate"><span class="pre">A_buf</span></code> and
<code class="code docutils literal notranslate"><span class="pre">B_buf</span></code>, which will live in VTA’s on-chip buffers.
Having this extra computational stage allows us to explicitly
stage cached reads and writes.
Third, we describe the vector addition computation which will
add <code class="code docutils literal notranslate"><span class="pre">A_buf</span></code> to <code class="code docutils literal notranslate"><span class="pre">B_buf</span></code> to produce <code class="code docutils literal notranslate"><span class="pre">C_buf</span></code>.
The last operation is a cast and copy back to DRAM, into results tensor
<code class="code docutils literal notranslate"><span class="pre">C</span></code>.</p>
<img alt="https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/vadd_dataflow.png" class="align-center" src="https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/vadd_dataflow.png" />
</div>
</div>
<div class="section" id="input-placeholders">
<h2>Input Placeholders<a class="headerlink" href="#input-placeholders" title="永久链接至标题">¶</a></h2>
<p>We describe the placeholder tensors <code class="code docutils literal notranslate"><span class="pre">A</span></code>, and <code class="code docutils literal notranslate"><span class="pre">B</span></code> in a tiled data
format to match the data layout requirements imposed by the VTA vector ALU.</p>
<p>For VTA’s general purpose operations such as vector adds, the tile size is
<code class="code docutils literal notranslate"><span class="pre">(env.BATCH,</span> <span class="pre">env.BLOCK_OUT)</span></code>.
The dimensions are specified in
the <code class="code docutils literal notranslate"><span class="pre">vta_config.json</span></code> configuration file and are set by default to
a (1, 16) vector.</p>
<p>In addition, A and B’s data types also needs to match the <code class="code docutils literal notranslate"><span class="pre">env.acc_dtype</span></code>
which is set by the <code class="code docutils literal notranslate"><span class="pre">vta_config.json</span></code> file to be a 32-bit integer.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Output channel factor m - total 64 x 16 = 1024 output channels</span>
<span class="n">m</span> <span class="o">=</span> <span class="mi">64</span>
<span class="c1"># Batch factor o - total 1 x 1 = 1</span>
<span class="n">o</span> <span class="o">=</span> <span class="mi">1</span>
<span class="c1"># A placeholder tensor in tiled data format</span>
<span class="n">A</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">placeholder</span><span class="p">((</span><span class="n">o</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;A&quot;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">env</span><span class="o">.</span><span class="n">acc_dtype</span><span class="p">)</span>
<span class="c1"># B placeholder tensor in tiled data format</span>
<span class="n">B</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">placeholder</span><span class="p">((</span><span class="n">o</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;B&quot;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">env</span><span class="o">.</span><span class="n">acc_dtype</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="copy-buffers">
<h2>Copy Buffers<a class="headerlink" href="#copy-buffers" title="永久链接至标题">¶</a></h2>
<p>One specificity of hardware accelerators, is that on-chip memory has to be
explicitly managed.
This means that we’ll need to describe intermediate tensors <code class="code docutils literal notranslate"><span class="pre">A_buf</span></code>
and <code class="code docutils literal notranslate"><span class="pre">B_buf</span></code> that can have a different memory scope than the original
placeholder tensors <code class="code docutils literal notranslate"><span class="pre">A</span></code> and <code class="code docutils literal notranslate"><span class="pre">B</span></code>.</p>
<p>Later in the scheduling phase, we can tell the compiler that <code class="code docutils literal notranslate"><span class="pre">A_buf</span></code>
and <code class="code docutils literal notranslate"><span class="pre">B_buf</span></code> will live in the VTA’s on-chip buffers (SRAM), while
<code class="code docutils literal notranslate"><span class="pre">A</span></code> and <code class="code docutils literal notranslate"><span class="pre">B</span></code> will live in main memory (DRAM).
We describe A_buf and B_buf as the result of a compute
operation that is the identity function.
This can later be interpreted by the compiler as a cached read operation.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># A copy buffer</span>
<span class="n">A_buf</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">((</span><span class="n">o</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">),</span> <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">A</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">),</span> <span class="s2">&quot;A_buf&quot;</span><span class="p">)</span>
<span class="c1"># B copy buffer</span>
<span class="n">B_buf</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">((</span><span class="n">o</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">),</span> <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">B</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">),</span> <span class="s2">&quot;B_buf&quot;</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="vector-addition">
<h2>Vector Addition<a class="headerlink" href="#vector-addition" title="永久链接至标题">¶</a></h2>
<p>Now we’re ready to describe the vector addition result tensor <code class="code docutils literal notranslate"><span class="pre">C</span></code>,
with another compute operation.
The compute function takes the shape of the tensor, as well as a lambda
function that describes the computation rule for each position of the tensor.</p>
<p>No computation happens during this phase, as we are only declaring how
the computation should be done.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Describe the in-VTA vector addition</span>
<span class="n">C_buf</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">(</span>
    <span class="p">(</span><span class="n">o</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">),</span>
    <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">A_buf</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_dtype</span><span class="p">)</span> <span class="o">+</span> <span class="n">B_buf</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_dtype</span><span class="p">),</span>
    <span class="n">name</span><span class="o">=</span><span class="s2">&quot;C_buf&quot;</span><span class="p">,</span>
<span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="casting-the-results">
<h2>Casting the Results<a class="headerlink" href="#casting-the-results" title="永久链接至标题">¶</a></h2>
<p>After the computation is done, we’ll need to send the results computed by VTA
back to main memory.</p>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p><strong>Memory Store Restrictions</strong></p>
<p>One specificity of VTA is that it only supports DRAM stores in the narrow
<code class="code docutils literal notranslate"><span class="pre">env.inp_dtype</span></code> data type format.
This lets us reduce the data footprint for memory transfers (more on this
in the basic matrix multiply example).</p>
</div>
<p>We perform one last typecast operation to the narrow
input activation data format.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Cast to output type, and send to main memory</span>
<span class="n">C</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">(</span>
    <span class="p">(</span><span class="n">o</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">),</span> <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">C_buf</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">inp_dtype</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;C&quot;</span>
<span class="p">)</span>
</pre></div>
</div>
<p>This concludes the computation declaration part of this tutorial.</p>
<div class="section" id="scheduling-the-computation">
<h3>Scheduling the Computation<a class="headerlink" href="#scheduling-the-computation" title="永久链接至标题">¶</a></h3>
<p>While the above lines describes the computation rule, we can obtain
<code class="code docutils literal notranslate"><span class="pre">C</span></code> in many ways.
TVM asks the user to provide an implementation of the computation called
<em>schedule</em>.</p>
<p>A schedule is a set of transformations to an original computation that
transforms the implementation of the computation without affecting
correctness.
This simple VTA programming tutorial aims to demonstrate basic schedule
transformations that will map the original schedule down to VTA hardware
primitives.</p>
</div>
</div>
<div class="section" id="default-schedule">
<h2>Default Schedule<a class="headerlink" href="#default-schedule" title="永久链接至标题">¶</a></h2>
<p>After we construct the schedule, by default the schedule computes
<code class="code docutils literal notranslate"><span class="pre">C</span></code> in the following way:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Let&#39;s take a look at the generated schedule</span>
<span class="n">s</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">create_schedule</span><span class="p">(</span><span class="n">C</span><span class="o">.</span><span class="n">op</span><span class="p">)</span>

<span class="nb">print</span><span class="p">(</span><span class="n">tvm</span><span class="o">.</span><span class="n">lower</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="p">[</span><span class="n">A</span><span class="p">,</span> <span class="n">B</span><span class="p">,</span> <span class="n">C</span><span class="p">],</span> <span class="n">simple_mode</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>primfn(A_1: handle, B_1: handle, C_1: handle) -&gt; ()
  attr = {&quot;from_legacy_te_schedule&quot;: True, &quot;global_symbol&quot;: &quot;main&quot;, &quot;tir.noalias&quot;: True}
  buffers = {A: Buffer(A_2: Pointer(int32), int32, [1, 64, 1, 16], []),
             C: Buffer(C_2: Pointer(int8), int8, [1, 64, 1, 16], []),
             B: Buffer(B_2: Pointer(int32), int32, [1, 64, 1, 16], [])}
  buffer_map = {A_1: A, B_1: B, C_1: C} {
  allocate(A_buf: Pointer(global int32), int32, [1024]), storage_scope = global;
  allocate(B_buf: Pointer(global int32), int32, [1024]), storage_scope = global {
    for (i1: int32, 0, 64) {
      for (i3: int32, 0, 16) {
        A_buf[((i1*16) + i3)] = (int32*)A_2[((i1*16) + i3)]
      }
    }
    for (i1_1: int32, 0, 64) {
      for (i3_1: int32, 0, 16) {
        B_buf[((i1_1*16) + i3_1)] = (int32*)B_2[((i1_1*16) + i3_1)]
      }
    }
    for (i1_2: int32, 0, 64) {
      for (i3_2: int32, 0, 16) {
        A_buf[((i1_2*16) + i3_2)] = ((int32*)A_buf[((i1_2*16) + i3_2)] + (int32*)B_buf[((i1_2*16) + i3_2)])
      }
    }
    for (i1_3: int32, 0, 64) {
      for (i3_3: int32, 0, 16) {
        C_2[((i1_3*16) + i3_3)] = cast(int8, (int32*)A_buf[((i1_3*16) + i3_3)])
      }
    }
  }
}
</pre></div>
</div>
<p>Although this schedule makes sense, it won’t compile to VTA.
In order to obtain correct code generation, we need to apply scheduling
primitives and code annotation that will transform the schedule into
one that can be directly lowered onto VTA hardware intrinsics.
Those include:</p>
<blockquote>
<div><ul class="simple">
<li><p>DMA copy operations which will take globally-scoped tensors and copy
those into locally-scoped tensors.</p></li>
<li><p>Vector ALU operations that will perform the vector add.</p></li>
</ul>
</div></blockquote>
</div>
<div class="section" id="buffer-scopes">
<h2>Buffer Scopes<a class="headerlink" href="#buffer-scopes" title="永久链接至标题">¶</a></h2>
<p>First, we set the scope of the copy buffers to indicate to TVM that these
intermediate tensors will be stored in the VTA’s on-chip SRAM buffers.
Below, we tell TVM that <code class="code docutils literal notranslate"><span class="pre">A_buf</span></code>, <code class="code docutils literal notranslate"><span class="pre">B_buf</span></code>, <code class="code docutils literal notranslate"><span class="pre">C_buf</span></code>
will live in VTA’s on-chip <em>accumulator buffer</em> which serves as
VTA’s general purpose register file.</p>
<p>Set the intermediate tensors’ scope to VTA’s on-chip accumulator buffer</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">s</span><span class="p">[</span><span class="n">A_buf</span><span class="p">]</span><span class="o">.</span><span class="n">set_scope</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_scope</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">B_buf</span><span class="p">]</span><span class="o">.</span><span class="n">set_scope</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_scope</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">C_buf</span><span class="p">]</span><span class="o">.</span><span class="n">set_scope</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_scope</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="dma-transfers">
<h2>DMA Transfers<a class="headerlink" href="#dma-transfers" title="永久链接至标题">¶</a></h2>
<p>We need to schedule DMA transfers to move data living in DRAM to
and from the VTA on-chip buffers.
We insert <code class="code docutils literal notranslate"><span class="pre">dma_copy</span></code> pragmas to indicate to the compiler
that the copy operations will be performed in bulk via DMA,
which is common in hardware accelerators.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Tag the buffer copies with the DMA pragma to map a copy loop to a</span>
<span class="c1"># DMA transfer operation</span>
<span class="n">s</span><span class="p">[</span><span class="n">A_buf</span><span class="p">]</span><span class="o">.</span><span class="n">pragma</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">A_buf</span><span class="p">]</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">env</span><span class="o">.</span><span class="n">dma_copy</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">B_buf</span><span class="p">]</span><span class="o">.</span><span class="n">pragma</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">B_buf</span><span class="p">]</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">env</span><span class="o">.</span><span class="n">dma_copy</span><span class="p">)</span>
<span class="n">s</span><span class="p">[</span><span class="n">C</span><span class="p">]</span><span class="o">.</span><span class="n">pragma</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="n">C</span><span class="p">]</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">env</span><span class="o">.</span><span class="n">dma_copy</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="alu-operations">
<h2>ALU Operations<a class="headerlink" href="#alu-operations" title="永久链接至标题">¶</a></h2>
<p>VTA has a vector ALU that can perform vector operations on tensors
in the accumulator buffer.
In order to tell TVM that a given operation needs to be mapped to the
VTA’s vector ALU, we need to explicitly tag the vector addition loop
with an <code class="code docutils literal notranslate"><span class="pre">env.alu</span></code> pragma.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Tell TVM that the computation needs to be performed</span>
<span class="c1"># on VTA&#39;s vector ALU</span>
<span class="n">s</span><span class="p">[</span><span class="n">C_buf</span><span class="p">]</span><span class="o">.</span><span class="n">pragma</span><span class="p">(</span><span class="n">C_buf</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">axis</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">env</span><span class="o">.</span><span class="n">alu</span><span class="p">)</span>

<span class="c1"># Let&#39;s take a look at the finalized schedule</span>
<span class="nb">print</span><span class="p">(</span><span class="n">vta</span><span class="o">.</span><span class="n">lower</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="p">[</span><span class="n">A</span><span class="p">,</span> <span class="n">B</span><span class="p">,</span> <span class="n">C</span><span class="p">],</span> <span class="n">simple_mode</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>primfn(A_1: handle, B_1: handle, C_1: handle) -&gt; ()
  attr = {&quot;from_legacy_te_schedule&quot;: True, &quot;global_symbol&quot;: &quot;main&quot;, &quot;tir.noalias&quot;: True}
  buffers = {C: Buffer(C_2: Pointer(int8), int8, [1, 64, 1, 16], []),
             A: Buffer(A_2: Pointer(int32), int32, [1, 64, 1, 16], []),
             B: Buffer(B_2: Pointer(int32), int32, [1, 64, 1, 16], [])}
  buffer_map = {A_1: A, B_1: B, C_1: C} {
  attr [IterVar(vta: int32, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_scope&quot; = 2 {
    @tir.call_extern(&quot;VTALoadBuffer2D&quot;, @tir.tvm_thread_context(@tir.vta.command_handle(, dtype=handle), dtype=handle), A_2, 0, 64, 1, 64, 0, 0, 0, 0, 0, 3, dtype=int32)
    @tir.call_extern(&quot;VTALoadBuffer2D&quot;, @tir.tvm_thread_context(@tir.vta.command_handle(, dtype=handle), dtype=handle), B_2, 0, 64, 1, 64, 0, 0, 0, 0, 64, 3, dtype=int32)
    attr [IterVar(vta, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_uop_scope&quot; = &quot;VTAPushALUOp&quot; {
      @tir.call_extern(&quot;VTAUopLoopBegin&quot;, 64, 1, 1, 0, dtype=int32)
      @tir.vta.uop_push(1, 0, 0, 64, 0, 2, 0, 0, dtype=int32)
      @tir.call_extern(&quot;VTAUopLoopEnd&quot;, dtype=int32)
    }
    @tir.vta.coproc_dep_push(2, 3, dtype=int32)
  }
  attr [IterVar(vta, (nullptr), &quot;ThreadIndex&quot;, &quot;vta&quot;)] &quot;coproc_scope&quot; = 3 {
    @tir.vta.coproc_dep_pop(2, 3, dtype=int32)
    @tir.call_extern(&quot;VTAStoreBuffer2D&quot;, @tir.tvm_thread_context(@tir.vta.command_handle(, dtype=handle), dtype=handle), 0, 4, C_2, 0, 64, 1, 64, dtype=int32)
  }
  @tir.vta.coproc_sync(, dtype=int32)
}
</pre></div>
</div>
<p>This concludes the scheduling portion of this tutorial.</p>
<div class="section" id="tvm-compilation">
<h3>TVM Compilation<a class="headerlink" href="#tvm-compilation" title="永久链接至标题">¶</a></h3>
<p>After we have finished specifying the schedule, we can compile it
into a TVM function. By default TVM compiles into a type-erased
function that can be directly called from python side.</p>
<p>In the following line, we use <code class="code docutils literal notranslate"><span class="pre">tvm.build</span></code> to create a function.
The build function takes the schedule, the desired signature of the
function(including the inputs and outputs) as well as target language
we want to compile to.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">my_vadd</span> <span class="o">=</span> <span class="n">vta</span><span class="o">.</span><span class="n">build</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="p">[</span><span class="n">A</span><span class="p">,</span> <span class="n">B</span><span class="p">,</span> <span class="n">C</span><span class="p">],</span> <span class="s2">&quot;ext_dev&quot;</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">target_host</span><span class="p">,</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;my_vadd&quot;</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
<div class="section" id="saving-the-module">
<h2>Saving the Module<a class="headerlink" href="#saving-the-module" title="永久链接至标题">¶</a></h2>
<p>TVM lets us save our module into a file so it can loaded back later. This
is called ahead-of-time compilation and allows us to save some compilation
time.
More importantly, this allows us to cross-compile the executable on our
development machine and send it over to the Pynq FPGA board over RPC for
execution.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Write the compiled module into an object file.</span>
<span class="n">temp</span> <span class="o">=</span> <span class="n">utils</span><span class="o">.</span><span class="n">tempdir</span><span class="p">()</span>
<span class="n">my_vadd</span><span class="o">.</span><span class="n">save</span><span class="p">(</span><span class="n">temp</span><span class="o">.</span><span class="n">relpath</span><span class="p">(</span><span class="s2">&quot;vadd.o&quot;</span><span class="p">))</span>

<span class="c1"># Send the executable over RPC</span>
<span class="n">remote</span><span class="o">.</span><span class="n">upload</span><span class="p">(</span><span class="n">temp</span><span class="o">.</span><span class="n">relpath</span><span class="p">(</span><span class="s2">&quot;vadd.o&quot;</span><span class="p">))</span>
</pre></div>
</div>
</div>
<div class="section" id="loading-the-module">
<h2>Loading the Module<a class="headerlink" href="#loading-the-module" title="永久链接至标题">¶</a></h2>
<p>We can load the compiled module from the file system to run the code.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">f</span> <span class="o">=</span> <span class="n">remote</span><span class="o">.</span><span class="n">load_module</span><span class="p">(</span><span class="s2">&quot;vadd.o&quot;</span><span class="p">)</span>
</pre></div>
</div>
<div class="section" id="running-the-function">
<h3>Running the Function<a class="headerlink" href="#running-the-function" title="永久链接至标题">¶</a></h3>
<p>The compiled TVM function uses a concise C API and can be invoked from
any language.</p>
<p>TVM provides an array API in python to aid quick testing and prototyping.
The array API is based on <a class="reference external" href="https://github.com/dmlc/dlpack">DLPack</a> standard.</p>
<ul class="simple">
<li><p>We first create a remote context (for remote execution on the Pynq).</p></li>
<li><p>Then <code class="code docutils literal notranslate"><span class="pre">tvm.nd.array</span></code> formats the data accordingly.</p></li>
<li><p><code class="code docutils literal notranslate"><span class="pre">f()</span></code> runs the actual computation.</p></li>
<li><p><code class="code docutils literal notranslate"><span class="pre">numpy()</span></code> copies the result array back in a format that can be
interpreted.</p></li>
</ul>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Get the remote device context</span>
<span class="n">ctx</span> <span class="o">=</span> <span class="n">remote</span><span class="o">.</span><span class="n">ext_dev</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span>

<span class="c1"># Initialize the A and B arrays randomly in the int range of (-128, 128]</span>
<span class="n">A_orig</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">randint</span><span class="p">(</span><span class="o">-</span><span class="mi">128</span><span class="p">,</span> <span class="mi">128</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="p">(</span><span class="n">o</span> <span class="o">*</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">m</span> <span class="o">*</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">A</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
<span class="n">B_orig</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">randint</span><span class="p">(</span><span class="o">-</span><span class="mi">128</span><span class="p">,</span> <span class="mi">128</span><span class="p">,</span> <span class="n">size</span><span class="o">=</span><span class="p">(</span><span class="n">o</span> <span class="o">*</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">m</span> <span class="o">*</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">B</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>

<span class="c1"># Apply packing to the A and B arrays from a 2D to a 4D packed layout</span>
<span class="n">A_packed</span> <span class="o">=</span> <span class="n">A_orig</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">o</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">)</span><span class="o">.</span><span class="n">transpose</span><span class="p">((</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>
<span class="n">B_packed</span> <span class="o">=</span> <span class="n">B_orig</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">o</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">)</span><span class="o">.</span><span class="n">transpose</span><span class="p">((</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>

<span class="c1"># Format the input/output arrays with tvm.nd.array to the DLPack standard</span>
<span class="n">A_nd</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">A_packed</span><span class="p">,</span> <span class="n">ctx</span><span class="p">)</span>
<span class="n">B_nd</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">B_packed</span><span class="p">,</span> <span class="n">ctx</span><span class="p">)</span>
<span class="n">C_nd</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">zeros</span><span class="p">((</span><span class="n">o</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">C</span><span class="o">.</span><span class="n">dtype</span><span class="p">),</span> <span class="n">ctx</span><span class="p">)</span>

<span class="c1"># Invoke the module to perform the computation</span>
<span class="n">f</span><span class="p">(</span><span class="n">A_nd</span><span class="p">,</span> <span class="n">B_nd</span><span class="p">,</span> <span class="n">C_nd</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="verifying-correctness">
<h3>Verifying Correctness<a class="headerlink" href="#verifying-correctness" title="永久链接至标题">¶</a></h3>
<p>Compute the reference result with numpy and assert that the output of the
matrix multiplication indeed is correct</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Compute reference result with numpy</span>
<span class="n">C_ref</span> <span class="o">=</span> <span class="p">(</span><span class="n">A_orig</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_dtype</span><span class="p">)</span> <span class="o">+</span> <span class="n">B_orig</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">acc_dtype</span><span class="p">))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">C</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
<span class="n">C_ref</span> <span class="o">=</span> <span class="n">C_ref</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">o</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">)</span><span class="o">.</span><span class="n">transpose</span><span class="p">((</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>
<span class="n">np</span><span class="o">.</span><span class="n">testing</span><span class="o">.</span><span class="n">assert_equal</span><span class="p">(</span><span class="n">C_ref</span><span class="p">,</span> <span class="n">C_nd</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
<span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Successful vector add test!&quot;</span><span class="p">)</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>Successful vector add test!
</pre></div>
</div>
</div>
<div class="section" id="summary">
<h3>总结<a class="headerlink" href="#summary" title="永久链接至标题">¶</a></h3>
<p>This tutorial provides a walk-through of TVM for programming the
deep learning accelerator VTA with a simple vector addition example.
The general workflow includes:</p>
<ul class="simple">
<li><p>Programming the FPGA with the VTA bitstream over RPC.</p></li>
<li><p>Describing the vector add computation via a series of computations.</p></li>
<li><p>Describing how we want to perform the computation using schedule primitives.</p></li>
<li><p>Compiling the function to the VTA target.</p></li>
<li><p>Running the compiled module and verifying it against a numpy implementation.</p></li>
</ul>
<p>You are more than welcome to check other examples out and tutorials
to learn more about the supported operations, schedule primitives
and other features supported by TVM to program VTA.</p>
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-topic-vta-tutorials-vta-get-started-py">
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../../../_downloads/d2434fbd36b5bd5a93a69ca80465d5b6/vta_get_started.py"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Python</span> <span class="pre">source</span> <span class="pre">code:</span> <span class="pre">vta_get_started.py</span></code></a></p>
</div>
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../../../_downloads/83b9961c758069912464db3443fffc06/vta_get_started.ipynb"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Jupyter</span> <span class="pre">notebook:</span> <span class="pre">vta_get_started.ipynb</span></code></a></p>
</div>
</div>
<p class="sphx-glr-signature"><a class="reference external" href="https://sphinx-gallery.github.io">Gallery generated by Sphinx-Gallery</a></p>
</div>
</div>
</div>


           </div>
           
          </div>
          

<footer>

    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="frontend/deploy_classification.html" class="btn btn-neutral float-right" title="Deploy Pretrained Vision Model from MxNet on VTA" accesskey="n" rel="next">下一个 <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="matrix_multiply.html" class="btn btn-neutral float-left" title="Simple Matrix Multiply" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> 上一个</a>
      
    </div>

<div id="button" class="backtop"><img src="../../../_static//img/right.svg" alt="backtop"/> </div>
<section class="footerSec">
    <div class="footerHeader">
      <ul class="d-flex align-md-items-center justify-content-between flex-column flex-md-row">
        <li class="copywrite d-flex align-items-center">
          <h5 id="copy-right-info">© 2020 Apache Software Foundation | All right reserved</h5>
        </li>
      </ul>

    </div>

    <ul>
      <li class="footernote">Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather, and the Apache TVM project logo are either trademarks or registered trademarks of the Apache Software Foundation.</li>
    </ul>

</section>
</footer>
        </div>
      </div>

    </section>

  </div>
  

    <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script>
    <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" crossorigin="anonymous"></script>

  </body>
  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
    <!-- Theme Analytics -->
    <script>
    (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
      (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
      m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
    })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

    ga('create', 'UA-75982049-2', 'auto');
    ga('send', 'pageview');
    </script>

    
   

</body>
</html>