





<!DOCTYPE html>
<html class="writer-html5" lang="zh-CN" >
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>Auto-tuning a convolutional network on VTA &mdash; tvm 0.8.dev1982 文档</title>
  

  
  <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/gallery.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/css/tlcpack_theme.css" type="text/css" />

  
  
    <link rel="shortcut icon" href="../../../../_static/tvm-logo-square.png"/>
  

  
  
  
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../../../" src="../../../../_static/documentation_options.js"></script>
        <script data-url_root="../../../../" id="documentation_options" src="../../../../_static/documentation_options.js"></script>
        <script src="../../../../_static/jquery.js"></script>
        <script src="../../../../_static/underscore.js"></script>
        <script src="../../../../_static/doctools.js"></script>
        <script src="../../../../_static/translations.js"></script>
    
    <script type="text/javascript" src="../../../../_static/js/theme.js"></script>

    
    <script type="text/javascript" src="../../../../_static/js/tlcpack_theme.js"></script>
    <link rel="index" title="索引" href="../../../../genindex.html" />
    <link rel="search" title="搜索" href="../../../../search.html" />
    <link rel="next" title="语言参考" href="../../../../reference/langref/index.html" />
    <link rel="prev" title="Auto-tuning a ALU fused op on VTA" href="tune_alu_vta.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    
<header class="header">
    <div class="innercontainer">
      <div class="headerInner d-flex justify-content-between align-items-center">
          <div class="headerLogo">
               <a href="https://tvm.apache.org/"><img src=https://tvm.apache.org/assets/images/logo.svg alt="logo"></a>
          </div>

          <div id="headMenu" class="headerNav">
            <button type="button" id="closeHeadMenu" class="navCloseBtn"><img src="../../../../_static/img/close-icon.svg" alt="Close"></button>
             <ul class="nav">
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/community>Community</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/download>Download</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/vta>VTA</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/blog>Blog</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/docs>Docs</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmconf.org>Conference</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://github.com/apache/tvm/>Github</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmchinese.github.io/declaration_zh_CN.html>About-Translators</a>
                </li>
             </ul>
               <div class="responsivetlcdropdown">
                 <button type="button" class="btn-link">
                   ASF
                 </button>
                 <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                 </ul>
               </div>
          </div>
            <div class="responsiveMenuIcon">
              <button type="button" id="menuBtn" class="btn-menu"><img src="../../../../_static/img/menu-icon.svg" alt="Menu Icon"></button>
            </div>

            <div class="tlcDropdown">
              <div class="dropdown">
                <button type="button" class="btn-link dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
                  ASF
                </button>
                <div class="dropdown-menu dropdown-menu-right">
                  <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                  </ul>
                </div>
              </div>
          </div>
       </div>
    </div>
 </header>
 
    <nav data-toggle="wy-nav-shift" class="wy-nav-side fixed">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../../../index.html">
          

          
            
            <img src="../../../../_static/tvm-logo-small.png" class="logo" alt="Logo"/>
          
          </a>

          
            
            
                <div class="version">
                  0.8.dev1982
                </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption" role="heading"><span class="caption-text">如何开始</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../install/index.html">安装 TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../contribute/index.html">贡献者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">用户引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../tutorial/index.html">User Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../how_to/index.html">How To Guides</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">开发者引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../dev/tutorial/index.html">Developer Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../dev/how_to/how_to.html">开发者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">架构指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../arch/index.html">Design and Architecture</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">主题引导</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../../../microtvm/index.html">microTVM：裸机使用TVM</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="../../index.html">VTA: Versatile Tensor Accelerator</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="../../install.html">VTA安装指南</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../dev/index.html">VTA设计与开发指南</a></li>
<li class="toctree-l2 current"><a class="reference internal" href="../index.html">VTA教程</a><ul class="current">
<li class="toctree-l3"><a class="reference internal" href="../matrix_multiply.html">Simple Matrix Multiply</a></li>
<li class="toctree-l3"><a class="reference internal" href="../vta_get_started.html">从 VTA 开始</a></li>
<li class="toctree-l3"><a class="reference internal" href="../index.html#compile-deep-learning-models">编译深度学习模型</a></li>
<li class="toctree-l3"><a class="reference internal" href="../index.html#optimize-tensor-operators">优化张量算子</a></li>
<li class="toctree-l3 current"><a class="reference internal" href="../index.html#auto-tuning">自动调整</a><ul class="current">
<li class="toctree-l4"><a class="reference internal" href="tune_alu_vta.html">Auto-tuning a ALU fused op on VTA</a></li>
<li class="toctree-l4"><a class="reference internal" href="tune_alu_vta.html#compile-network">编译网络</a></li>
<li class="toctree-l4"><a class="reference internal" href="tune_alu_vta.html#set-tuning-options">Set Tuning Options</a></li>
<li class="toctree-l4 current"><a class="current reference internal" href="#">Auto-tuning a convolutional network on VTA</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../index.html#literature">Literature</a></li>
</ul>
</li>
</ul>
<p class="caption" role="heading"><span class="caption-text">参考指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/langref/index.html">语言参考</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/api/python/index.html">Python API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/api/links.html">Other APIs</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../reference/publications.html">Publications</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../genindex.html">索引</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
      
      <nav class="wy-nav-top" aria-label="top navigation" data-toggle="wy-nav-top">
        
            <div class="togglemenu">

            </div>
            <div class="nav-content">
              <!-- tvm -->
              Table of content
            </div>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        

          




















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../../../index.html">Docs</a> <span class="br-arrow">></span></li>
        
          <li><a href="../../index.html">VTA: Versatile Tensor Accelerator</a> <span class="br-arrow">></span></li>
        
          <li><a href="../index.html">VTA教程</a> <span class="br-arrow">></span></li>
        
      <li>Auto-tuning a convolutional network on VTA</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
            <a href="../../../../_sources/topic/vta/tutorials/autotvm/tune_relay_vta.rst.txt" rel="nofollow"> <img src="../../../../_static//img/source.svg" alt="viewsource"/></a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="sphx-glr-download-link-note admonition note">
<p class="admonition-title">注解</p>
<p>点击 <a class="reference internal" href="#sphx-glr-download-topic-vta-tutorials-autotvm-tune-relay-vta-py"><span class="std std-ref">此处</span></a> 获取完整示例代码</p>
</div>
<div class="sphx-glr-example-title section" id="auto-tuning-a-convolutional-network-on-vta">
<span id="sphx-glr-topic-vta-tutorials-autotvm-tune-relay-vta-py"></span><h1>Auto-tuning a convolutional network on VTA<a class="headerlink" href="#auto-tuning-a-convolutional-network-on-vta" title="永久链接至标题">¶</a></h1>
<p><strong>作者</strong>: <a class="reference external" href="https://github.com/merrymercy">Lianmin Zheng</a>, <a class="reference external" href="https://homes.cs.washington.edu/~moreau/">Thierry Moreau</a></p>
<p>Auto-tuning for a specific accelerator design is critical for getting the best
performance for any given operator. This is a tutorial showcases how to tune a
whole convolutional network on VTA.</p>
<p>The operator implementation for VTA in TVM is written in template form.
The template has many tunable knobs (tile factor, virtual threads, etc).
We will tune all convolution operators in the neural network. After tuning,
we produce a log file which stores the best schedule parameters for all tuned
operators. When the TVM compiler compiles these operators, it will query this
log file to get the best knob parameters.</p>
<div class="section" id="install-dependencies">
<h2>安装依赖<a class="headerlink" href="#install-dependencies" title="永久链接至标题">¶</a></h2>
<p>To use the autotvm package in tvm, we need to install some extra dependencies.
(change “3” to “2” if you use python2):</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>pip3 install --user psutil xgboost tornado mxnet requests <span class="s2">&quot;Pillow&lt;7&quot;</span> cloudpickle
</pre></div>
</div>
<p>To make TVM run faster during tuning, it is recommended to use cython
as FFI of TVM. In the root directory of TVM, execute
(change “3” to “2” if you use python2):</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>pip3 install --user cython
sudo make cython3
</pre></div>
</div>
<p>Now return to python code. Import packages.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
<span class="kn">from</span> <span class="nn">mxnet.gluon.model_zoo</span> <span class="k">import</span> <span class="n">vision</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">PIL</span> <span class="k">import</span> <span class="n">Image</span>

<span class="kn">from</span> <span class="nn">tvm</span> <span class="k">import</span> <span class="n">topi</span>
<span class="kn">import</span> <span class="nn">tvm</span>
<span class="kn">from</span> <span class="nn">tvm</span> <span class="k">import</span> <span class="n">te</span>
<span class="kn">from</span> <span class="nn">tvm</span> <span class="k">import</span> <span class="n">rpc</span><span class="p">,</span> <span class="n">autotvm</span><span class="p">,</span> <span class="n">relay</span>
<span class="kn">from</span> <span class="nn">tvm.contrib</span> <span class="k">import</span> <span class="n">graph_executor</span><span class="p">,</span> <span class="n">utils</span><span class="p">,</span> <span class="n">download</span>
<span class="kn">from</span> <span class="nn">tvm.autotvm.measure.measure_methods</span> <span class="k">import</span> <span class="n">request_remote</span>
<span class="kn">from</span> <span class="nn">tvm.autotvm.tuner</span> <span class="k">import</span> <span class="n">XGBTuner</span><span class="p">,</span> <span class="n">GATuner</span><span class="p">,</span> <span class="n">RandomTuner</span><span class="p">,</span> <span class="n">GridSearchTuner</span>

<span class="kn">import</span> <span class="nn">vta</span>
<span class="kn">from</span> <span class="nn">vta.testing</span> <span class="k">import</span> <span class="n">simulator</span>
<span class="kn">from</span> <span class="nn">vta.top</span> <span class="k">import</span> <span class="n">graph_pack</span>
</pre></div>
</div>
</div>
<div class="section" id="compile-network">
<h2>编译网络<a class="headerlink" href="#compile-network" title="永久链接至标题">¶</a></h2>
<p>Perform vta-specific compilation with Relay from a Gluon model</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">compile_network</span><span class="p">(</span><span class="n">env</span><span class="p">,</span> <span class="n">target</span><span class="p">,</span> <span class="n">model</span><span class="p">,</span> <span class="n">start_pack</span><span class="p">,</span> <span class="n">stop_pack</span><span class="p">):</span>

    <span class="c1"># Populate the shape and data type dictionary</span>
    <span class="n">dtype_dict</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;data&quot;</span><span class="p">:</span> <span class="s2">&quot;float32&quot;</span><span class="p">}</span>
    <span class="n">shape_dict</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;data&quot;</span><span class="p">:</span> <span class="p">(</span><span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">224</span><span class="p">,</span> <span class="mi">224</span><span class="p">)}</span>

    <span class="c1"># Get off the shelf gluon model, and convert to relay</span>
    <span class="n">gluon_model</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">get_model</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">pretrained</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
    <span class="n">mod</span><span class="p">,</span> <span class="n">params</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">frontend</span><span class="o">.</span><span class="n">from_mxnet</span><span class="p">(</span><span class="n">gluon_model</span><span class="p">,</span> <span class="n">shape_dict</span><span class="p">)</span>

    <span class="c1"># Update shape and type dictionary</span>
    <span class="n">shape_dict</span><span class="o">.</span><span class="n">update</span><span class="p">({</span><span class="n">k</span><span class="p">:</span> <span class="n">v</span><span class="o">.</span><span class="n">shape</span> <span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="n">params</span><span class="o">.</span><span class="n">items</span><span class="p">()})</span>
    <span class="n">dtype_dict</span><span class="o">.</span><span class="n">update</span><span class="p">({</span><span class="n">k</span><span class="p">:</span> <span class="nb">str</span><span class="p">(</span><span class="n">v</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span> <span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="n">params</span><span class="o">.</span><span class="n">items</span><span class="p">()})</span>

    <span class="c1"># Perform quantization in Relay</span>
    <span class="c1"># Note: We set opt_level to 3 in order to fold batch norm</span>
    <span class="k">with</span> <span class="n">tvm</span><span class="o">.</span><span class="n">transform</span><span class="o">.</span><span class="n">PassContext</span><span class="p">(</span><span class="n">opt_level</span><span class="o">=</span><span class="mi">3</span><span class="p">):</span>
        <span class="k">with</span> <span class="n">relay</span><span class="o">.</span><span class="n">quantize</span><span class="o">.</span><span class="n">qconfig</span><span class="p">(</span><span class="n">global_scale</span><span class="o">=</span><span class="mf">8.0</span><span class="p">,</span> <span class="n">skip_conv_layers</span><span class="o">=</span><span class="p">[</span><span class="mi">0</span><span class="p">]):</span>
            <span class="n">mod</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">quantize</span><span class="o">.</span><span class="n">quantize</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="o">=</span><span class="n">params</span><span class="p">)</span>

    <span class="c1"># Perform graph packing and constant folding for VTA target</span>
    <span class="k">if</span> <span class="n">target</span><span class="o">.</span><span class="n">device_name</span> <span class="o">==</span> <span class="s2">&quot;vta&quot;</span><span class="p">:</span>
        <span class="k">assert</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_IN</span> <span class="o">==</span> <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span>
        <span class="n">relay_prog</span> <span class="o">=</span> <span class="n">graph_pack</span><span class="p">(</span>
            <span class="n">mod</span><span class="p">[</span><span class="s2">&quot;main&quot;</span><span class="p">],</span>
            <span class="n">env</span><span class="o">.</span><span class="n">BATCH</span><span class="p">,</span>
            <span class="n">env</span><span class="o">.</span><span class="n">BLOCK_OUT</span><span class="p">,</span>
            <span class="n">env</span><span class="o">.</span><span class="n">WGT_WIDTH</span><span class="p">,</span>
            <span class="n">start_name</span><span class="o">=</span><span class="n">start_pack</span><span class="p">,</span>
            <span class="n">stop_name</span><span class="o">=</span><span class="n">stop_pack</span><span class="p">,</span>
        <span class="p">)</span>

    <span class="k">return</span> <span class="n">relay_prog</span><span class="p">,</span> <span class="n">params</span>
</pre></div>
</div>
</div>
<div class="section" id="start-rpc-tracker">
<h2>启动 RPC 跟踪器<a class="headerlink" href="#start-rpc-tracker" title="永久链接至标题">¶</a></h2>
<p>TVM uses an RPC session to communicate with Pynq boards.
During tuning, the tuner will send the generated code to the board and
measure the speed of code on the board.</p>
<p>To scale up tuning, TVM uses an RPC Tracker to manage multiple devices.
The RPC Tracker is a centralized controller node. We can register all devices to
the tracker. For example, if we have 10 Pynq boards, we can register all of them
to the tracker, and run 10 measurements in parallel, accelerating the tuning process.</p>
<p>To start an RPC tracker, run this command on the host machine. The tracker is
required during the whole tuning process, so we need to open a new terminal for
this command:</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>python -m tvm.exec.rpc_tracker --host<span class="o">=</span><span class="m">0</span>.0.0.0 --port<span class="o">=</span><span class="m">9190</span>
</pre></div>
</div>
<p>The expected output is:</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>INFO:RPCTracker:bind to <span class="m">0</span>.0.0.0:9190
</pre></div>
</div>
</div>
<div class="section" id="register-devices-to-rpc-tracker">
<h2>Register devices to RPC Tracker<a class="headerlink" href="#register-devices-to-rpc-tracker" title="永久链接至标题">¶</a></h2>
<p>Now we can register our devices to the tracker. The first step is to
build the TVM runtime for the Pynq devices.</p>
<p>Follow <a class="reference internal" href="../../index.html#vta-index"><span class="std std-ref">VTA: Versatile Tensor Accelerator</span></a>
to build the TVM runtime on the device. Then register the device to the tracker with:</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>python -m tvm.exec.rpc_server --tracker<span class="o">=[</span>HOST_IP<span class="o">]</span>:9190 --key<span class="o">=</span>pynq
</pre></div>
</div>
<p>(replace <code class="code docutils literal notranslate"><span class="pre">[HOST_IP]</span></code> with the IP address of your host machine)</p>
<p>After registering devices, we can confirm it by querying the rpc_tracker:</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>python -m tvm.exec.query_rpc_tracker --host<span class="o">=</span><span class="m">0</span>.0.0.0 --port<span class="o">=</span><span class="m">9190</span>
</pre></div>
</div>
<p>For example, if we have 6 Pynq boards and 11 Raspberry Pi 3B,
the output can be</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>Queue Status
----------------------------------
key          total  free  pending
----------------------------------
pynq         <span class="m">6</span>      <span class="m">6</span>     <span class="m">0</span>
rpi3b        <span class="m">11</span>     <span class="m">11</span>    <span class="m">0</span>
----------------------------------
</pre></div>
</div>
<p>You can register multiple devices to the tracker to accelerate tuning.</p>
</div>
<div class="section" id="set-tuning-options">
<h2>Set Tuning Options<a class="headerlink" href="#set-tuning-options" title="永久链接至标题">¶</a></h2>
<p>Before tuning, we should apply some configurations.
Here we use an Pynq-Z1 board as an example.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Tracker host and port can be set by your environment</span>
<span class="n">tracker_host</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;TVM_TRACKER_HOST&quot;</span><span class="p">,</span> <span class="s2">&quot;127.0.0.1&quot;</span><span class="p">)</span>
<span class="n">tracker_port</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;TVM_TRACKER_PORT&quot;</span><span class="p">,</span> <span class="mi">9190</span><span class="p">))</span>

<span class="c1"># Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file</span>
<span class="n">env</span> <span class="o">=</span> <span class="n">vta</span><span class="o">.</span><span class="n">get_env</span><span class="p">()</span>

<span class="c1"># This target is used for cross compilation. You can query it by :code:`gcc -v` on your device.</span>
<span class="c1"># Set ``device=arm_cpu`` to run inference on the CPU</span>
<span class="c1"># or ``device=vta`` to run inference on the FPGA.</span>
<span class="n">device</span> <span class="o">=</span> <span class="s2">&quot;vta&quot;</span>
<span class="n">target</span> <span class="o">=</span> <span class="n">env</span><span class="o">.</span><span class="n">target</span> <span class="k">if</span> <span class="n">device</span> <span class="o">==</span> <span class="s2">&quot;vta&quot;</span> <span class="k">else</span> <span class="n">env</span><span class="o">.</span><span class="n">target_vta_cpu</span>

<span class="c1"># Name of Gluon model to compile</span>
<span class="c1"># The ``start_pack`` and ``stop_pack`` labels indicate where</span>
<span class="c1"># to start and end the graph packing relay pass: in other words</span>
<span class="c1"># where to start and finish offloading to VTA.</span>
<span class="n">network</span> <span class="o">=</span> <span class="s2">&quot;resnet18_v1&quot;</span>
<span class="n">start_pack</span> <span class="o">=</span> <span class="s2">&quot;nn.max_pool2d&quot;</span>
<span class="n">stop_pack</span> <span class="o">=</span> <span class="s2">&quot;nn.global_avg_pool2d&quot;</span>

<span class="c1"># Tuning option</span>
<span class="n">log_file</span> <span class="o">=</span> <span class="s2">&quot;</span><span class="si">%s</span><span class="s2">.</span><span class="si">%s</span><span class="s2">.log&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">device</span><span class="p">,</span> <span class="n">network</span><span class="p">)</span>
<span class="n">tuning_option</span> <span class="o">=</span> <span class="p">{</span>
    <span class="s2">&quot;log_filename&quot;</span><span class="p">:</span> <span class="n">log_file</span><span class="p">,</span>
    <span class="s2">&quot;tuner&quot;</span><span class="p">:</span> <span class="s2">&quot;random&quot;</span><span class="p">,</span>
    <span class="s2">&quot;n_trial&quot;</span><span class="p">:</span> <span class="mi">1000</span><span class="p">,</span>
    <span class="s2">&quot;early_stopping&quot;</span><span class="p">:</span> <span class="kc">None</span><span class="p">,</span>
    <span class="s2">&quot;measure_option&quot;</span><span class="p">:</span> <span class="n">autotvm</span><span class="o">.</span><span class="n">measure_option</span><span class="p">(</span>
        <span class="n">builder</span><span class="o">=</span><span class="n">autotvm</span><span class="o">.</span><span class="n">LocalBuilder</span><span class="p">(),</span>
        <span class="n">runner</span><span class="o">=</span><span class="n">autotvm</span><span class="o">.</span><span class="n">RPCRunner</span><span class="p">(</span>
            <span class="n">env</span><span class="o">.</span><span class="n">TARGET</span><span class="p">,</span>
            <span class="n">host</span><span class="o">=</span><span class="n">tracker_host</span><span class="p">,</span>
            <span class="n">port</span><span class="o">=</span><span class="n">tracker_port</span><span class="p">,</span>
            <span class="n">number</span><span class="o">=</span><span class="mi">5</span><span class="p">,</span>
            <span class="n">timeout</span><span class="o">=</span><span class="mi">60</span><span class="p">,</span>
            <span class="n">module_loader</span><span class="o">=</span><span class="n">vta</span><span class="o">.</span><span class="n">module_loader</span><span class="p">(),</span>
            <span class="c1"># check_correctness=True, # TODO: re-enable when check_correctness works again.</span>
        <span class="p">),</span>
    <span class="p">),</span>
<span class="p">}</span>
</pre></div>
</div>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>How to set tuning options</p>
<p>In general, the default values provided here work well.
If you have enough time budget, you can set <code class="code docutils literal notranslate"><span class="pre">n_trial</span></code>, <code class="code docutils literal notranslate"><span class="pre">early_stopping</span></code>
to larger values, makes the tuning run for longer.
If your device is under-powered or your conv2d operators are large, consider
setting a longer timeout.</p>
</div>
</div>
<div class="section" id="begin-tuning">
<h2>Begin Tuning<a class="headerlink" href="#begin-tuning" title="永久链接至标题">¶</a></h2>
<p>Now we can extract tuning tasks from the network and begin tuning.
Here, we provide a simple utility function to tune a list of tasks.
This function is just an initial implementation which tunes them in sequential order.
We will introduce a more sophisticated tuning scheduler in the future.</p>
<p>Given that the tuning will be done on Pynq FPGA boards, make sure that
the <code class="docutils literal notranslate"><span class="pre">`TARGET</span></code> entry in the <code class="docutils literal notranslate"><span class="pre">vta_config.json</span></code> file is set to <code class="docutils literal notranslate"><span class="pre">pynq</span></code>.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># You can skip the implementation of this function for this tutorial.</span>
<span class="k">def</span> <span class="nf">tune_tasks</span><span class="p">(</span>
    <span class="n">tasks</span><span class="p">,</span>
    <span class="n">measure_option</span><span class="p">,</span>
    <span class="n">tuner</span><span class="o">=</span><span class="s2">&quot;xgb&quot;</span><span class="p">,</span>
    <span class="n">n_trial</span><span class="o">=</span><span class="mi">1000</span><span class="p">,</span>
    <span class="n">early_stopping</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
    <span class="n">log_filename</span><span class="o">=</span><span class="s2">&quot;tuning.log&quot;</span><span class="p">,</span>
    <span class="n">use_transfer_learning</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="p">):</span>

    <span class="c1"># create tmp log file</span>
    <span class="n">tmp_log_file</span> <span class="o">=</span> <span class="n">log_filename</span> <span class="o">+</span> <span class="s2">&quot;.tmp&quot;</span>
    <span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span><span class="n">tmp_log_file</span><span class="p">):</span>
        <span class="n">os</span><span class="o">.</span><span class="n">remove</span><span class="p">(</span><span class="n">tmp_log_file</span><span class="p">)</span>

    <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">tsk</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="nb">reversed</span><span class="p">(</span><span class="n">tasks</span><span class="p">)):</span>
        <span class="n">prefix</span> <span class="o">=</span> <span class="s2">&quot;[Task </span><span class="si">%2d</span><span class="s2">/</span><span class="si">%2d</span><span class="s2">] &quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">i</span> <span class="o">+</span> <span class="mi">1</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">tasks</span><span class="p">))</span>

        <span class="c1"># create tuner</span>
        <span class="k">if</span> <span class="n">tuner</span> <span class="o">==</span> <span class="s2">&quot;xgb&quot;</span> <span class="ow">or</span> <span class="n">tuner</span> <span class="o">==</span> <span class="s2">&quot;xgb-rank&quot;</span><span class="p">:</span>
            <span class="n">tuner_obj</span> <span class="o">=</span> <span class="n">XGBTuner</span><span class="p">(</span><span class="n">tsk</span><span class="p">,</span> <span class="n">loss_type</span><span class="o">=</span><span class="s2">&quot;rank&quot;</span><span class="p">)</span>
        <span class="k">elif</span> <span class="n">tuner</span> <span class="o">==</span> <span class="s2">&quot;xgb_knob&quot;</span><span class="p">:</span>
            <span class="n">tuner_obj</span> <span class="o">=</span> <span class="n">XGBTuner</span><span class="p">(</span><span class="n">tsk</span><span class="p">,</span> <span class="n">loss_type</span><span class="o">=</span><span class="s2">&quot;rank&quot;</span><span class="p">,</span> <span class="n">feature_type</span><span class="o">=</span><span class="s2">&quot;knob&quot;</span><span class="p">)</span>
        <span class="k">elif</span> <span class="n">tuner</span> <span class="o">==</span> <span class="s2">&quot;ga&quot;</span><span class="p">:</span>
            <span class="n">tuner_obj</span> <span class="o">=</span> <span class="n">GATuner</span><span class="p">(</span><span class="n">tsk</span><span class="p">,</span> <span class="n">pop_size</span><span class="o">=</span><span class="mi">50</span><span class="p">)</span>
        <span class="k">elif</span> <span class="n">tuner</span> <span class="o">==</span> <span class="s2">&quot;random&quot;</span><span class="p">:</span>
            <span class="n">tuner_obj</span> <span class="o">=</span> <span class="n">RandomTuner</span><span class="p">(</span><span class="n">tsk</span><span class="p">)</span>
        <span class="k">elif</span> <span class="n">tuner</span> <span class="o">==</span> <span class="s2">&quot;gridsearch&quot;</span><span class="p">:</span>
            <span class="n">tuner_obj</span> <span class="o">=</span> <span class="n">GridSearchTuner</span><span class="p">(</span><span class="n">tsk</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">&quot;Invalid tuner: &quot;</span> <span class="o">+</span> <span class="n">tuner</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">use_transfer_learning</span><span class="p">:</span>
            <span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">isfile</span><span class="p">(</span><span class="n">tmp_log_file</span><span class="p">):</span>
                <span class="n">tuner_obj</span><span class="o">.</span><span class="n">load_history</span><span class="p">(</span><span class="n">autotvm</span><span class="o">.</span><span class="n">record</span><span class="o">.</span><span class="n">load_from_file</span><span class="p">(</span><span class="n">tmp_log_file</span><span class="p">))</span>

        <span class="c1"># do tuning</span>
        <span class="n">tsk_trial</span> <span class="o">=</span> <span class="nb">min</span><span class="p">(</span><span class="n">n_trial</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">tsk</span><span class="o">.</span><span class="n">config_space</span><span class="p">))</span>
        <span class="n">tuner_obj</span><span class="o">.</span><span class="n">tune</span><span class="p">(</span>
            <span class="n">n_trial</span><span class="o">=</span><span class="n">tsk_trial</span><span class="p">,</span>
            <span class="n">early_stopping</span><span class="o">=</span><span class="n">early_stopping</span><span class="p">,</span>
            <span class="n">measure_option</span><span class="o">=</span><span class="n">measure_option</span><span class="p">,</span>
            <span class="n">callbacks</span><span class="o">=</span><span class="p">[</span>
                <span class="n">autotvm</span><span class="o">.</span><span class="n">callback</span><span class="o">.</span><span class="n">progress_bar</span><span class="p">(</span><span class="n">tsk_trial</span><span class="p">,</span> <span class="n">prefix</span><span class="o">=</span><span class="n">prefix</span><span class="p">),</span>
                <span class="n">autotvm</span><span class="o">.</span><span class="n">callback</span><span class="o">.</span><span class="n">log_to_file</span><span class="p">(</span><span class="n">tmp_log_file</span><span class="p">),</span>
            <span class="p">],</span>
        <span class="p">)</span>

    <span class="c1"># pick best records to a cache file</span>
    <span class="n">autotvm</span><span class="o">.</span><span class="n">record</span><span class="o">.</span><span class="n">pick_best</span><span class="p">(</span><span class="n">tmp_log_file</span><span class="p">,</span> <span class="n">log_filename</span><span class="p">)</span>
    <span class="n">os</span><span class="o">.</span><span class="n">remove</span><span class="p">(</span><span class="n">tmp_log_file</span><span class="p">)</span>
</pre></div>
</div>
<p>Register VTA-specific tuning tasks</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">register_vta_tuning_tasks</span><span class="p">():</span>
    <span class="kn">from</span> <span class="nn">tvm.autotvm.task</span> <span class="k">import</span> <span class="n">TaskExtractEnv</span>

    <span class="nd">@tvm</span><span class="o">.</span><span class="n">te</span><span class="o">.</span><span class="n">tag_scope</span><span class="p">(</span><span class="n">tag</span><span class="o">=</span><span class="n">topi</span><span class="o">.</span><span class="n">tag</span><span class="o">.</span><span class="n">ELEMWISE</span><span class="p">)</span>
    <span class="k">def</span> <span class="nf">my_clip</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">a_min</span><span class="p">,</span> <span class="n">a_max</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Unlike topi&#39;s current clip, put min and max into two stages.&quot;&quot;&quot;</span>
        <span class="n">const_min</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">tir</span><span class="o">.</span><span class="n">const</span><span class="p">(</span><span class="n">a_min</span><span class="p">,</span> <span class="n">x</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
        <span class="n">const_max</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">tir</span><span class="o">.</span><span class="n">const</span><span class="p">(</span><span class="n">a_max</span><span class="p">,</span> <span class="n">x</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">tvm</span><span class="o">.</span><span class="n">te</span><span class="o">.</span><span class="n">min</span><span class="p">(</span><span class="n">x</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">),</span> <span class="n">const_max</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;clipA&quot;</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">compute</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="k">lambda</span> <span class="o">*</span><span class="n">i</span><span class="p">:</span> <span class="n">tvm</span><span class="o">.</span><span class="n">te</span><span class="o">.</span><span class="n">max</span><span class="p">(</span><span class="n">x</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">),</span> <span class="n">const_min</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;clipB&quot;</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span>

    <span class="c1"># init autotvm env to register VTA operator</span>
    <span class="n">TaskExtractEnv</span><span class="p">()</span>

    <span class="nd">@autotvm</span><span class="o">.</span><span class="n">template</span><span class="p">(</span><span class="s2">&quot;conv2d_packed.vta&quot;</span><span class="p">)</span>
    <span class="k">def</span> <span class="nf">_topi_nn_conv2d</span><span class="p">(</span><span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
        <span class="k">assert</span> <span class="ow">not</span> <span class="n">kwargs</span><span class="p">,</span> <span class="s2">&quot;Do not support kwargs in template function call&quot;</span>
        <span class="n">A</span><span class="p">,</span> <span class="n">W</span> <span class="o">=</span> <span class="n">args</span><span class="p">[:</span><span class="mi">2</span><span class="p">]</span>

        <span class="k">with</span> <span class="n">tvm</span><span class="o">.</span><span class="n">target</span><span class="o">.</span><span class="n">vta</span><span class="p">():</span>
            <span class="n">res</span> <span class="o">=</span> <span class="n">vta</span><span class="o">.</span><span class="n">top</span><span class="o">.</span><span class="n">conv2d_packed</span><span class="p">(</span><span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
            <span class="n">res</span> <span class="o">=</span> <span class="n">topi</span><span class="o">.</span><span class="n">right_shift</span><span class="p">(</span><span class="n">res</span><span class="p">,</span> <span class="mi">8</span><span class="p">)</span>
            <span class="n">res</span> <span class="o">=</span> <span class="n">my_clip</span><span class="p">(</span><span class="n">res</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">127</span><span class="p">)</span>
            <span class="n">res</span> <span class="o">=</span> <span class="n">topi</span><span class="o">.</span><span class="n">cast</span><span class="p">(</span><span class="n">res</span><span class="p">,</span> <span class="s2">&quot;int8&quot;</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">tvm</span><span class="o">.</span><span class="n">target</span><span class="o">.</span><span class="n">Target</span><span class="o">.</span><span class="n">current</span><span class="p">()</span><span class="o">.</span><span class="n">device_name</span> <span class="o">==</span> <span class="s2">&quot;vta&quot;</span><span class="p">:</span>
            <span class="n">s</span> <span class="o">=</span> <span class="n">vta</span><span class="o">.</span><span class="n">top</span><span class="o">.</span><span class="n">schedule_conv2d_packed</span><span class="p">([</span><span class="n">res</span><span class="p">])</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">s</span> <span class="o">=</span> <span class="n">te</span><span class="o">.</span><span class="n">create_schedule</span><span class="p">([</span><span class="n">res</span><span class="o">.</span><span class="n">op</span><span class="p">])</span>
        <span class="k">return</span> <span class="n">s</span><span class="p">,</span> <span class="p">[</span><span class="n">A</span><span class="p">,</span> <span class="n">W</span><span class="p">,</span> <span class="n">res</span><span class="p">]</span>
</pre></div>
</div>
<p>Finally, we launch tuning jobs and evaluate the end-to-end performance.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">tune_and_evaluate</span><span class="p">(</span><span class="n">tuning_opt</span><span class="p">):</span>

    <span class="c1"># Register VTA tuning tasks</span>
    <span class="n">register_vta_tuning_tasks</span><span class="p">()</span>

    <span class="c1"># Perform task extraction on Relay program</span>
    <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Extract tasks...&quot;</span><span class="p">)</span>
    <span class="n">relay_prog</span><span class="p">,</span> <span class="n">params</span> <span class="o">=</span> <span class="n">compile_network</span><span class="p">(</span><span class="n">env</span><span class="p">,</span> <span class="n">target</span><span class="p">,</span> <span class="n">network</span><span class="p">,</span> <span class="n">start_pack</span><span class="p">,</span> <span class="n">stop_pack</span><span class="p">)</span>
    <span class="n">mod</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">IRModule</span><span class="o">.</span><span class="n">from_expr</span><span class="p">(</span><span class="n">relay_prog</span><span class="p">)</span>
    <span class="n">tasks</span> <span class="o">=</span> <span class="n">autotvm</span><span class="o">.</span><span class="n">task</span><span class="o">.</span><span class="n">extract_from_program</span><span class="p">(</span>
        <span class="n">mod</span><span class="p">,</span>
        <span class="n">params</span><span class="o">=</span><span class="n">params</span><span class="p">,</span>
        <span class="n">ops</span><span class="o">=</span><span class="p">(</span><span class="n">relay</span><span class="o">.</span><span class="n">op</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;nn.conv2d&quot;</span><span class="p">),),</span>
        <span class="n">target</span><span class="o">=</span><span class="n">target</span><span class="p">,</span>
        <span class="n">target_host</span><span class="o">=</span><span class="n">env</span><span class="o">.</span><span class="n">target_host</span><span class="p">,</span>
    <span class="p">)</span>

    <span class="c1"># filter out non-packed conv2d task</span>
    <span class="n">tasks</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">t</span><span class="p">:</span> <span class="nb">len</span><span class="p">(</span><span class="n">t</span><span class="o">.</span><span class="n">args</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="mi">1</span><span class="p">])</span> <span class="o">&gt;</span> <span class="mi">4</span> <span class="ow">and</span> <span class="s2">&quot;conv&quot;</span> <span class="ow">in</span> <span class="n">t</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="n">tasks</span><span class="p">))</span>

    <span class="c1"># We should have extracted 10 convolution tasks</span>
    <span class="k">assert</span> <span class="nb">len</span><span class="p">(</span><span class="n">tasks</span><span class="p">)</span> <span class="o">==</span> <span class="mi">10</span>
    <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Extracted </span><span class="si">{}</span><span class="s2"> conv2d tasks:&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">tasks</span><span class="p">)))</span>
    <span class="k">for</span> <span class="n">tsk</span> <span class="ow">in</span> <span class="n">tasks</span><span class="p">:</span>
        <span class="n">inp</span> <span class="o">=</span> <span class="n">tsk</span><span class="o">.</span><span class="n">args</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="mi">1</span><span class="p">]</span>
        <span class="n">wgt</span> <span class="o">=</span> <span class="n">tsk</span><span class="o">.</span><span class="n">args</span><span class="p">[</span><span class="mi">1</span><span class="p">][</span><span class="mi">1</span><span class="p">]</span>
        <span class="n">batch</span> <span class="o">=</span> <span class="n">inp</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">*</span> <span class="n">inp</span><span class="p">[</span><span class="mi">4</span><span class="p">]</span>
        <span class="n">in_filter</span> <span class="o">=</span> <span class="n">inp</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="n">inp</span><span class="p">[</span><span class="mi">5</span><span class="p">]</span>
        <span class="n">out_filter</span> <span class="o">=</span> <span class="n">wgt</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">*</span> <span class="n">wgt</span><span class="p">[</span><span class="mi">4</span><span class="p">]</span>
        <span class="n">height</span><span class="p">,</span> <span class="n">width</span> <span class="o">=</span> <span class="n">inp</span><span class="p">[</span><span class="mi">2</span><span class="p">],</span> <span class="n">inp</span><span class="p">[</span><span class="mi">3</span><span class="p">]</span>
        <span class="n">hkernel</span><span class="p">,</span> <span class="n">wkernel</span> <span class="o">=</span> <span class="n">wgt</span><span class="p">[</span><span class="mi">2</span><span class="p">],</span> <span class="n">wgt</span><span class="p">[</span><span class="mi">3</span><span class="p">]</span>
        <span class="n">hstride</span><span class="p">,</span> <span class="n">wstride</span> <span class="o">=</span> <span class="n">tsk</span><span class="o">.</span><span class="n">args</span><span class="p">[</span><span class="mi">2</span><span class="p">][</span><span class="mi">0</span><span class="p">],</span> <span class="n">tsk</span><span class="o">.</span><span class="n">args</span><span class="p">[</span><span class="mi">2</span><span class="p">][</span><span class="mi">1</span><span class="p">]</span>
        <span class="n">hpad</span><span class="p">,</span> <span class="n">wpad</span> <span class="o">=</span> <span class="n">tsk</span><span class="o">.</span><span class="n">args</span><span class="p">[</span><span class="mi">3</span><span class="p">][</span><span class="mi">0</span><span class="p">],</span> <span class="n">tsk</span><span class="o">.</span><span class="n">args</span><span class="p">[</span><span class="mi">3</span><span class="p">][</span><span class="mi">1</span><span class="p">]</span>
        <span class="nb">print</span><span class="p">(</span>
            <span class="s2">&quot;(</span><span class="si">{}</span><span class="s2">, </span><span class="si">{}</span><span class="s2">, </span><span class="si">{}</span><span class="s2">, </span><span class="si">{}</span><span class="s2">, </span><span class="si">{}</span><span class="s2">, </span><span class="si">{}</span><span class="s2">, </span><span class="si">{}</span><span class="s2">, </span><span class="si">{}</span><span class="s2">, </span><span class="si">{}</span><span class="s2">, </span><span class="si">{}</span><span class="s2">, </span><span class="si">{}</span><span class="s2">)&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span>
                <span class="n">batch</span><span class="p">,</span>
                <span class="n">height</span><span class="p">,</span>
                <span class="n">width</span><span class="p">,</span>
                <span class="n">in_filter</span><span class="p">,</span>
                <span class="n">out_filter</span><span class="p">,</span>
                <span class="n">hkernel</span><span class="p">,</span>
                <span class="n">wkernel</span><span class="p">,</span>
                <span class="n">hpad</span><span class="p">,</span>
                <span class="n">wpad</span><span class="p">,</span>
                <span class="n">hstride</span><span class="p">,</span>
                <span class="n">wstride</span><span class="p">,</span>
            <span class="p">)</span>
        <span class="p">)</span>

    <span class="c1"># We do not run the tuning in our webpage server since it takes too long.</span>
    <span class="c1"># Comment the following line to run it by yourself.</span>
    <span class="k">return</span>

    <span class="c1"># run tuning tasks</span>
    <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Tuning...&quot;</span><span class="p">)</span>
    <span class="n">tune_tasks</span><span class="p">(</span><span class="n">tasks</span><span class="p">,</span> <span class="o">**</span><span class="n">tuning_opt</span><span class="p">)</span>

    <span class="c1"># evaluate with tuning history</span>
    <span class="k">if</span> <span class="n">env</span><span class="o">.</span><span class="n">TARGET</span> <span class="o">!=</span> <span class="s2">&quot;sim&quot;</span><span class="p">:</span>
        <span class="c1"># Get remote from fleet node</span>
        <span class="n">remote</span> <span class="o">=</span> <span class="n">autotvm</span><span class="o">.</span><span class="n">measure</span><span class="o">.</span><span class="n">request_remote</span><span class="p">(</span>
            <span class="n">env</span><span class="o">.</span><span class="n">TARGET</span><span class="p">,</span> <span class="n">tracker_host</span><span class="p">,</span> <span class="n">tracker_port</span><span class="p">,</span> <span class="n">timeout</span><span class="o">=</span><span class="mi">10000</span>
        <span class="p">)</span>
        <span class="c1"># Reconfigure the JIT runtime and FPGA.</span>
        <span class="n">vta</span><span class="o">.</span><span class="n">reconfig_runtime</span><span class="p">(</span><span class="n">remote</span><span class="p">)</span>
        <span class="n">vta</span><span class="o">.</span><span class="n">program_fpga</span><span class="p">(</span><span class="n">remote</span><span class="p">,</span> <span class="n">bitstream</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="c1"># In simulation mode, host the RPC server locally.</span>
        <span class="n">remote</span> <span class="o">=</span> <span class="n">rpc</span><span class="o">.</span><span class="n">LocalSession</span><span class="p">()</span>

    <span class="c1"># compile kernels with history best records</span>
    <span class="k">with</span> <span class="n">autotvm</span><span class="o">.</span><span class="n">tophub</span><span class="o">.</span><span class="n">context</span><span class="p">(</span><span class="n">target</span><span class="p">,</span> <span class="n">extra_files</span><span class="o">=</span><span class="p">[</span><span class="n">log_file</span><span class="p">]):</span>
        <span class="c1"># Compile network</span>
        <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Compile...&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">target</span><span class="o">.</span><span class="n">device_name</span> <span class="o">!=</span> <span class="s2">&quot;vta&quot;</span><span class="p">:</span>
            <span class="k">with</span> <span class="n">tvm</span><span class="o">.</span><span class="n">transform</span><span class="o">.</span><span class="n">PassContext</span><span class="p">(</span><span class="n">opt_level</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span> <span class="n">disabled_pass</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;AlterOpLayout&quot;</span><span class="p">}):</span>
                <span class="n">lib</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">build</span><span class="p">(</span>
                    <span class="n">relay_prog</span><span class="p">,</span> <span class="n">target</span><span class="o">=</span><span class="n">target</span><span class="p">,</span> <span class="n">params</span><span class="o">=</span><span class="n">params</span><span class="p">,</span> <span class="n">target_host</span><span class="o">=</span><span class="n">env</span><span class="o">.</span><span class="n">target_host</span>
                <span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">with</span> <span class="n">vta</span><span class="o">.</span><span class="n">build_config</span><span class="p">(</span><span class="n">opt_level</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span> <span class="n">disabled_pass</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;AlterOpLayout&quot;</span><span class="p">}):</span>
                <span class="n">lib</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">build</span><span class="p">(</span>
                    <span class="n">relay_prog</span><span class="p">,</span> <span class="n">target</span><span class="o">=</span><span class="n">target</span><span class="p">,</span> <span class="n">params</span><span class="o">=</span><span class="n">params</span><span class="p">,</span> <span class="n">target_host</span><span class="o">=</span><span class="n">env</span><span class="o">.</span><span class="n">target_host</span>
                <span class="p">)</span>

        <span class="c1"># Export library</span>
        <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Upload...&quot;</span><span class="p">)</span>
        <span class="n">temp</span> <span class="o">=</span> <span class="n">utils</span><span class="o">.</span><span class="n">tempdir</span><span class="p">()</span>
        <span class="n">lib</span><span class="o">.</span><span class="n">export_library</span><span class="p">(</span><span class="n">temp</span><span class="o">.</span><span class="n">relpath</span><span class="p">(</span><span class="s2">&quot;graphlib.tar&quot;</span><span class="p">))</span>
        <span class="n">remote</span><span class="o">.</span><span class="n">upload</span><span class="p">(</span><span class="n">temp</span><span class="o">.</span><span class="n">relpath</span><span class="p">(</span><span class="s2">&quot;graphlib.tar&quot;</span><span class="p">))</span>
        <span class="n">lib</span> <span class="o">=</span> <span class="n">remote</span><span class="o">.</span><span class="n">load_module</span><span class="p">(</span><span class="s2">&quot;graphlib.tar&quot;</span><span class="p">)</span>

        <span class="c1"># Generate the graph executor</span>
        <span class="n">ctx</span> <span class="o">=</span> <span class="n">remote</span><span class="o">.</span><span class="n">ext_dev</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span> <span class="k">if</span> <span class="n">device</span> <span class="o">==</span> <span class="s2">&quot;vta&quot;</span> <span class="k">else</span> <span class="n">remote</span><span class="o">.</span><span class="n">cpu</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span>
        <span class="n">m</span> <span class="o">=</span> <span class="n">graph_executor</span><span class="o">.</span><span class="n">GraphModule</span><span class="p">(</span><span class="n">lib</span><span class="p">[</span><span class="s2">&quot;default&quot;</span><span class="p">](</span><span class="n">ctx</span><span class="p">))</span>

        <span class="c1"># upload parameters to device</span>
        <span class="n">image</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">((</span><span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">uniform</span><span class="p">(</span><span class="n">size</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">224</span><span class="p">,</span> <span class="mi">224</span><span class="p">)))</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">))</span>
        <span class="n">m</span><span class="o">.</span><span class="n">set_input</span><span class="p">(</span><span class="s2">&quot;data&quot;</span><span class="p">,</span> <span class="n">image</span><span class="p">)</span>

        <span class="c1"># evaluate</span>
        <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Evaluate inference time cost...&quot;</span><span class="p">)</span>
        <span class="n">timer</span> <span class="o">=</span> <span class="n">m</span><span class="o">.</span><span class="n">module</span><span class="o">.</span><span class="n">time_evaluator</span><span class="p">(</span><span class="s2">&quot;run&quot;</span><span class="p">,</span> <span class="n">ctx</span><span class="p">,</span> <span class="n">number</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">repeat</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span>
        <span class="n">tcost</span> <span class="o">=</span> <span class="n">timer</span><span class="p">()</span>
        <span class="n">prof_res</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">tcost</span><span class="o">.</span><span class="n">results</span><span class="p">)</span> <span class="o">*</span> <span class="mi">1000</span>  <span class="c1"># convert to millisecond</span>
        <span class="nb">print</span><span class="p">(</span>
            <span class="s2">&quot;Mean inference time (std dev): </span><span class="si">%.2f</span><span class="s2"> ms (</span><span class="si">%.2f</span><span class="s2"> ms)&quot;</span>
            <span class="o">%</span> <span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">mean</span><span class="p">(</span><span class="n">prof_res</span><span class="p">),</span> <span class="n">np</span><span class="o">.</span><span class="n">std</span><span class="p">(</span><span class="n">prof_res</span><span class="p">))</span>
        <span class="p">)</span>


<span class="c1"># Run the tuning and evaluate the results</span>
<span class="n">tune_and_evaluate</span><span class="p">(</span><span class="n">tuning_option</span><span class="p">)</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>Extract tasks...
Extracted 10 conv2d tasks:
(1, 14, 14, 256, 512, 1, 1, 0, 0, 2, 2)
(1, 28, 28, 128, 256, 1, 1, 0, 0, 2, 2)
(1, 56, 56, 64, 128, 1, 1, 0, 0, 2, 2)
(1, 56, 56, 64, 64, 3, 3, 1, 1, 1, 1)
(1, 28, 28, 128, 128, 3, 3, 1, 1, 1, 1)
(1, 56, 56, 64, 128, 3, 3, 1, 1, 2, 2)
(1, 14, 14, 256, 256, 3, 3, 1, 1, 1, 1)
(1, 28, 28, 128, 256, 3, 3, 1, 1, 2, 2)
(1, 7, 7, 512, 512, 3, 3, 1, 1, 1, 1)
(1, 14, 14, 256, 512, 3, 3, 1, 1, 2, 2)
</pre></div>
</div>
</div>
<div class="section" id="sample-output">
<h2>样本输出<a class="headerlink" href="#sample-output" title="永久链接至标题">¶</a></h2>
<p>The tuning needs to compile many programs and extract feature from them.
So a high performance CPU is recommended.
One sample output is listed below.
It takes about 2 hours on a 16T CPU, and 6 Pynq boards.</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>Extract tasks...
<span class="o">[</span>Warning<span class="o">]</span> Invalid shape during AutoTVM task creation
Extracted <span class="m">10</span> conv2d tasks:
    Task<span class="o">(</span><span class="nv">func_name</span><span class="o">=</span>topi_nn_conv2d, <span class="nv">args</span><span class="o">=((</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">16</span>, <span class="m">14</span>, <span class="m">14</span>, <span class="m">1</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">32</span>, <span class="m">16</span>, <span class="m">1</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">0</span>, <span class="m">0</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">)</span>, <span class="nv">kwargs</span><span class="o">={}</span>, <span class="nv">workload</span><span class="o">=(</span><span class="s1">&#39;conv2d&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">16</span>, <span class="m">14</span>, <span class="m">14</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">32</span>, <span class="m">16</span>, <span class="m">1</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">0</span>, <span class="m">0</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">))</span>
    Task<span class="o">(</span><span class="nv">func_name</span><span class="o">=</span>topi_nn_conv2d, <span class="nv">args</span><span class="o">=((</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">8</span>, <span class="m">28</span>, <span class="m">28</span>, <span class="m">1</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">16</span>, <span class="m">8</span>, <span class="m">1</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">0</span>, <span class="m">0</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">)</span>, <span class="nv">kwargs</span><span class="o">={}</span>, <span class="nv">workload</span><span class="o">=(</span><span class="s1">&#39;conv2d&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">8</span>, <span class="m">28</span>, <span class="m">28</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">16</span>, <span class="m">8</span>, <span class="m">1</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">0</span>, <span class="m">0</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">))</span>
    Task<span class="o">(</span><span class="nv">func_name</span><span class="o">=</span>topi_nn_conv2d, <span class="nv">args</span><span class="o">=((</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">4</span>, <span class="m">56</span>, <span class="m">56</span>, <span class="m">1</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">8</span>, <span class="m">4</span>, <span class="m">1</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">0</span>, <span class="m">0</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">)</span>, <span class="nv">kwargs</span><span class="o">={}</span>, <span class="nv">workload</span><span class="o">=(</span><span class="s1">&#39;conv2d&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">4</span>, <span class="m">56</span>, <span class="m">56</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">8</span>, <span class="m">4</span>, <span class="m">1</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">0</span>, <span class="m">0</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">))</span>
    Task<span class="o">(</span><span class="nv">func_name</span><span class="o">=</span>topi_nn_conv2d, <span class="nv">args</span><span class="o">=((</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">4</span>, <span class="m">56</span>, <span class="m">56</span>, <span class="m">1</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">4</span>, <span class="m">4</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">)</span>, <span class="nv">kwargs</span><span class="o">={}</span>, <span class="nv">workload</span><span class="o">=(</span><span class="s1">&#39;conv2d&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">4</span>, <span class="m">56</span>, <span class="m">56</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">4</span>, <span class="m">4</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">))</span>
    Task<span class="o">(</span><span class="nv">func_name</span><span class="o">=</span>topi_nn_conv2d, <span class="nv">args</span><span class="o">=((</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">8</span>, <span class="m">28</span>, <span class="m">28</span>, <span class="m">1</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">8</span>, <span class="m">8</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">)</span>, <span class="nv">kwargs</span><span class="o">={}</span>, <span class="nv">workload</span><span class="o">=(</span><span class="s1">&#39;conv2d&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">8</span>, <span class="m">28</span>, <span class="m">28</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">8</span>, <span class="m">8</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">))</span>
    Task<span class="o">(</span><span class="nv">func_name</span><span class="o">=</span>topi_nn_conv2d, <span class="nv">args</span><span class="o">=((</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">4</span>, <span class="m">56</span>, <span class="m">56</span>, <span class="m">1</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">8</span>, <span class="m">4</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">)</span>, <span class="nv">kwargs</span><span class="o">={}</span>, <span class="nv">workload</span><span class="o">=(</span><span class="s1">&#39;conv2d&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">4</span>, <span class="m">56</span>, <span class="m">56</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">8</span>, <span class="m">4</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">))</span>
    Task<span class="o">(</span><span class="nv">func_name</span><span class="o">=</span>topi_nn_conv2d, <span class="nv">args</span><span class="o">=((</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">16</span>, <span class="m">14</span>, <span class="m">14</span>, <span class="m">1</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">16</span>, <span class="m">16</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">)</span>, <span class="nv">kwargs</span><span class="o">={}</span>, <span class="nv">workload</span><span class="o">=(</span><span class="s1">&#39;conv2d&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">16</span>, <span class="m">14</span>, <span class="m">14</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">16</span>, <span class="m">16</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">))</span>
    Task<span class="o">(</span><span class="nv">func_name</span><span class="o">=</span>topi_nn_conv2d, <span class="nv">args</span><span class="o">=((</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">8</span>, <span class="m">28</span>, <span class="m">28</span>, <span class="m">1</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">16</span>, <span class="m">8</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">)</span>, <span class="nv">kwargs</span><span class="o">={}</span>, <span class="nv">workload</span><span class="o">=(</span><span class="s1">&#39;conv2d&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">8</span>, <span class="m">28</span>, <span class="m">28</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">16</span>, <span class="m">8</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">))</span>
    Task<span class="o">(</span><span class="nv">func_name</span><span class="o">=</span>topi_nn_conv2d, <span class="nv">args</span><span class="o">=((</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">32</span>, <span class="m">7</span>, <span class="m">7</span>, <span class="m">1</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">32</span>, <span class="m">32</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">)</span>, <span class="nv">kwargs</span><span class="o">={}</span>, <span class="nv">workload</span><span class="o">=(</span><span class="s1">&#39;conv2d&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">32</span>, <span class="m">7</span>, <span class="m">7</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">32</span>, <span class="m">32</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">))</span>
    Task<span class="o">(</span><span class="nv">func_name</span><span class="o">=</span>topi_nn_conv2d, <span class="nv">args</span><span class="o">=((</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">16</span>, <span class="m">14</span>, <span class="m">14</span>, <span class="m">1</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="s1">&#39;TENSOR&#39;</span>, <span class="o">(</span><span class="m">32</span>, <span class="m">16</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span><span class="o">)</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">)</span>, <span class="nv">kwargs</span><span class="o">={}</span>, <span class="nv">workload</span><span class="o">=(</span><span class="s1">&#39;conv2d&#39;</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">16</span>, <span class="m">14</span>, <span class="m">14</span>, <span class="m">1</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">32</span>, <span class="m">16</span>, <span class="m">3</span>, <span class="m">3</span>, <span class="m">16</span>, <span class="m">16</span>, <span class="s1">&#39;int8&#39;</span><span class="o">)</span>, <span class="o">(</span><span class="m">2</span>, <span class="m">2</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="o">(</span><span class="m">1</span>, <span class="m">1</span><span class="o">)</span>, <span class="s1">&#39;NCHW1n16c&#39;</span>, <span class="s1">&#39;int32&#39;</span><span class="o">))</span>
Tuning...
<span class="o">[</span>Task  <span class="m">1</span>/10<span class="o">]</span>  Current/Best:    <span class="m">0</span>.72/  <span class="m">23</span>.24 GFLOPS <span class="p">|</span> Progress: <span class="o">(</span><span class="m">480</span>/1000<span class="o">)</span> <span class="p">|</span> <span class="m">640</span>.31 s Done.
<span class="o">[</span>Task  <span class="m">2</span>/10<span class="o">]</span>  Current/Best:    <span class="m">0</span>.00/  <span class="m">27</span>.69 GFLOPS <span class="p">|</span> Progress: <span class="o">(</span><span class="m">576</span>/1000<span class="o">)</span> <span class="p">|</span> <span class="m">810</span>.09 s Done.
<span class="o">[</span>Task  <span class="m">3</span>/10<span class="o">]</span>  Current/Best:    <span class="m">0</span>.00/  <span class="m">22</span>.97 GFLOPS <span class="p">|</span> Progress: <span class="o">(</span><span class="m">1000</span>/1000<span class="o">)</span> <span class="p">|</span> <span class="m">1125</span>.37 s Done.
<span class="o">[</span>Task  <span class="m">4</span>/10<span class="o">]</span>  Current/Best:    <span class="m">0</span>.00/  <span class="m">31</span>.26 GFLOPS <span class="p">|</span> Progress: <span class="o">(</span><span class="m">1000</span>/1000<span class="o">)</span> <span class="p">|</span> <span class="m">1025</span>.52 s Done.
<span class="o">[</span>Task  <span class="m">5</span>/10<span class="o">]</span>  Current/Best:    <span class="m">0</span>.00/  <span class="m">15</span>.15 GFLOPS <span class="p">|</span> Progress: <span class="o">(</span><span class="m">1000</span>/1000<span class="o">)</span> <span class="p">|</span> <span class="m">1236</span>.58 s Done.
<span class="o">[</span>Task  <span class="m">6</span>/10<span class="o">]</span>  Current/Best:    <span class="m">0</span>.00/  <span class="m">22</span>.74 GFLOPS <span class="p">|</span> Progress: <span class="o">(</span><span class="m">1000</span>/1000<span class="o">)</span> <span class="p">|</span> <span class="m">906</span>.60 s Done.
<span class="o">[</span>Task  <span class="m">7</span>/10<span class="o">]</span>  Current/Best:    <span class="m">0</span>.00/  <span class="m">15</span>.27 GFLOPS <span class="p">|</span> Progress: <span class="o">(</span><span class="m">1000</span>/1000<span class="o">)</span> <span class="p">|</span> <span class="m">1056</span>.25 s Done.
<span class="o">[</span>Task  <span class="m">8</span>/10<span class="o">]</span>  Current/Best:    <span class="m">0</span>.00/   <span class="m">2</span>.18 GFLOPS <span class="p">|</span> Progress: <span class="o">(</span><span class="m">1000</span>/1000<span class="o">)</span> <span class="p">|</span> <span class="m">2275</span>.29 s Done.
<span class="o">[</span>Task  <span class="m">9</span>/10<span class="o">]</span>  Current/Best:    <span class="m">2</span>.23/   <span class="m">3</span>.99 GFLOPS <span class="p">|</span> Progress: <span class="o">(</span><span class="m">1000</span>/1000<span class="o">)</span> <span class="p">|</span> <span class="m">2527</span>.25 s Done.
<span class="o">[</span>Task <span class="m">10</span>/10<span class="o">]</span>  Current/Best:    <span class="m">1</span>.56/   <span class="m">6</span>.32 GFLOPS <span class="p">|</span> Progress: <span class="o">(</span><span class="m">480</span>/1000<span class="o">)</span> <span class="p">|</span> <span class="m">1304</span>.84 s Done.
Compile...
Upload...
Evaluate inference <span class="nb">time</span> cost...
Mean inference <span class="nb">time</span> <span class="o">(</span>std dev<span class="o">)</span>: <span class="m">621</span>.79 ms <span class="o">(</span><span class="m">0</span>.14 ms<span class="o">)</span>
</pre></div>
</div>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p><strong>Experiencing Difficulties?</strong></p>
<p>The auto tuning module is error-prone. If you always see ” 0.00/ 0.00 GFLOPS”,
then there must be something wrong.</p>
<p>First, make sure you set the correct configuration of your device.
Then, you can print debug information by adding these lines in the beginning
of the script. It will print every measurement result, where you can find useful
error messages.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">logging</span>
<span class="n">logging</span><span class="o">.</span><span class="n">getLogger</span><span class="p">(</span><span class="s1">&#39;autotvm&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">setLevel</span><span class="p">(</span><span class="n">logging</span><span class="o">.</span><span class="n">DEBUG</span><span class="p">)</span>
</pre></div>
</div>
<p>Finally, always feel free to ask our community for help on <a class="reference external" href="https://discuss.tvm.apache.org">https://discuss.tvm.apache.org</a></p>
</div>
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-topic-vta-tutorials-autotvm-tune-relay-vta-py">
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../../../../_downloads/d7b7e50e9f5b4ff04d55a56e52314c71/tune_relay_vta.py"><code class="xref download docutils literal notranslate"><span class="pre">Python</span> <span class="pre">源码下载:</span> <span class="pre">tune_relay_vta.py</span></code></a></p>
</div>
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../../../../_downloads/b1b0cbd807166348a0eabbad6bfbbdaf/tune_relay_vta.ipynb"><code class="xref download docutils literal notranslate"><span class="pre">Jupyter</span> <span class="pre">notebook</span> <span class="pre">下载:</span> <span class="pre">tune_relay_vta.ipynb</span></code></a></p>
</div>
</div>
<p class="sphx-glr-signature"><a class="reference external" href="https://sphinx-gallery.github.io">Gallery generated by Sphinx-Gallery</a></p>
</div>
</div>


           </div>
           
          </div>
          

<footer>

    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="../../../../reference/langref/index.html" class="btn btn-neutral float-right" title="语言参考" accesskey="n" rel="next">下一个 <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="tune_alu_vta.html" class="btn btn-neutral float-left" title="Auto-tuning a ALU fused op on VTA" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> 上一个</a>
      
    </div>

<div id="button" class="backtop"><img src="../../../../_static//img/right.svg" alt="backtop"/> </div>
<section class="footerSec">
    <div class="footerHeader">
      <ul class="d-flex align-md-items-center justify-content-between flex-column flex-md-row">
        <li class="copywrite d-flex align-items-center">
          <h5 id="copy-right-info">© 2020 Apache Software Foundation | All right reserved</h5>
        </li>
      </ul>

    </div>

    <ul>
      <li class="footernote">Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather, and the Apache TVM project logo are either trademarks or registered trademarks of the Apache Software Foundation.</li>
    </ul>

</section>
</footer>
        </div>
      </div>

    </section>

  </div>
  

    <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script>
    <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" crossorigin="anonymous"></script>

  </body>
  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
    <!-- Theme Analytics -->
    <script>
    (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
      (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
      m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
    })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

    ga('create', 'UA-75982049-2', 'auto');
    ga('send', 'pageview');
    </script>

    
   

</body>
</html>