





<!DOCTYPE html>
<html class="writer-html5" lang="zh-CN" >
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>Vitis AI Integration &mdash; tvm 0.8.dev1982 文档</title>
  

  
  <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
  <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/gallery.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/css/tlcpack_theme.css" type="text/css" />

  
  
    <link rel="shortcut icon" href="../../_static/tvm-logo-square.png"/>
  

  
  
  
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../" src="../../_static/documentation_options.js"></script>
        <script data-url_root="../../" id="documentation_options" src="../../_static/documentation_options.js"></script>
        <script src="../../_static/jquery.js"></script>
        <script src="../../_static/underscore.js"></script>
        <script src="../../_static/doctools.js"></script>
        <script src="../../_static/translations.js"></script>
    
    <script type="text/javascript" src="../../_static/js/theme.js"></script>

    
    <script type="text/javascript" src="../../_static/js/tlcpack_theme.js"></script>
    <link rel="index" title="索引" href="../../genindex.html" />
    <link rel="search" title="搜索" href="../../search.html" />
    <link rel="next" title="Relay BNNS Integration" href="bnns.html" />
    <link rel="prev" title="Relay TensorRT Integration" href="tensorrt.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    
<header class="header">
    <div class="innercontainer">
      <div class="headerInner d-flex justify-content-between align-items-center">
          <div class="headerLogo">
               <a href="https://tvm.apache.org/"><img src=https://tvm.apache.org/assets/images/logo.svg alt="logo"></a>
          </div>

          <div id="headMenu" class="headerNav">
            <button type="button" id="closeHeadMenu" class="navCloseBtn"><img src="../../_static/img/close-icon.svg" alt="Close"></button>
             <ul class="nav">
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/community>Community</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/download>Download</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/vta>VTA</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/blog>Blog</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/docs>Docs</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmconf.org>Conference</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://github.com/apache/tvm/>Github</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmchinese.github.io/declaration_zh_CN.html>About-Translators</a>
                </li>
             </ul>
               <div class="responsivetlcdropdown">
                 <button type="button" class="btn-link">
                   ASF
                 </button>
                 <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                 </ul>
               </div>
          </div>
            <div class="responsiveMenuIcon">
              <button type="button" id="menuBtn" class="btn-menu"><img src="../../_static/img/menu-icon.svg" alt="Menu Icon"></button>
            </div>

            <div class="tlcDropdown">
              <div class="dropdown">
                <button type="button" class="btn-link dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
                  ASF
                </button>
                <div class="dropdown-menu dropdown-menu-right">
                  <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                  </ul>
                </div>
              </div>
          </div>
       </div>
    </div>
 </header>
 
    <nav data-toggle="wy-nav-shift" class="wy-nav-side fixed">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../index.html">
          

          
            
            <img src="../../_static/tvm-logo-small.png" class="logo" alt="Logo"/>
          
          </a>

          
            
            
                <div class="version">
                  0.8.dev1982
                </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption" role="heading"><span class="caption-text">如何开始</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../install/index.html">安装 TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../contribute/index.html">贡献者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">用户引导</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../../tutorial/index.html">User Tutorial</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="../index.html">How To Guides</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="../compile_models/index.html">编译深度学习模型</a></li>
<li class="toctree-l2 current"><a class="reference internal" href="index.html">TVM 部署模型和集成</a><ul class="current">
<li class="toctree-l3"><a class="reference internal" href="index.html#build-the-tvm-runtime-library">构建 TVM 运行 runtime 库</a></li>
<li class="toctree-l3"><a class="reference internal" href="index.html#cross-compile-the-tvm-runtime-for-other-architectures">为其它架构交叉编译TVM runtime</a></li>
<li class="toctree-l3"><a class="reference internal" href="index.html#optimize-and-tune-models-for-target-devices">针对目标设备优化和调整模型</a></li>
<li class="toctree-l3 current"><a class="reference internal" href="index.html#deploy-optimized-model-on-target-devices">在目标设备上部署优化的模型</a><ul class="current">
<li class="toctree-l4"><a class="reference internal" href="cpp_deploy.html">Deploy TVM Module using C++ API</a></li>
<li class="toctree-l4"><a class="reference internal" href="android.html">部署到安卓</a></li>
<li class="toctree-l4"><a class="reference internal" href="integrate.html">Integrate TVM into Your Project</a></li>
<li class="toctree-l4"><a class="reference internal" href="hls.html">HLS Backend Example</a></li>
<li class="toctree-l4"><a class="reference internal" href="arm_compute_lib.html">Relay Arm<sup>®</sup> Compute Library Integration</a></li>
<li class="toctree-l4"><a class="reference internal" href="tensorrt.html">Relay TensorRT Integration</a></li>
<li class="toctree-l4 current"><a class="current reference internal" href="#">Vitis AI Integration</a></li>
<li class="toctree-l4"><a class="reference internal" href="bnns.html">Relay BNNS Integration</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="index.html#additional-deployment-how-tos">其他部署方式</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../work_with_relay/index.html">Work With Relay</a></li>
<li class="toctree-l2"><a class="reference internal" href="../work_with_schedules/index.html">Work With Tensor Expression and Schedules</a></li>
<li class="toctree-l2"><a class="reference internal" href="../optimize_operators/index.html">优化张量算子</a></li>
<li class="toctree-l2"><a class="reference internal" href="../tune_with_autotvm/index.html">Auto-Tune with Templates and AutoTVM</a></li>
<li class="toctree-l2"><a class="reference internal" href="../tune_with_autoscheduler/index.html">Use AutoScheduler for Template-Free Scheduling</a></li>
<li class="toctree-l2"><a class="reference internal" href="../work_with_microtvm/index.html">Work With microTVM</a></li>
<li class="toctree-l2"><a class="reference internal" href="../extend_tvm/index.html">Extend TVM</a></li>
<li class="toctree-l2"><a class="reference internal" href="../profile/index.html">Profile Models</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../errors.html">Handle TVM Errors</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq.html">常见提问</a></li>
</ul>
</li>
</ul>
<p class="caption" role="heading"><span class="caption-text">开发者引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../dev/tutorial/index.html">Developer Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../dev/how_to/how_to.html">开发者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">架构指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../arch/index.html">Design and Architecture</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">主题引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../topic/microtvm/index.html">microTVM：裸机使用TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../topic/vta/index.html">VTA: Versatile Tensor Accelerator</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">参考指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../reference/langref/index.html">语言参考</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/api/python/index.html">Python API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/api/links.html">Other APIs</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/publications.html">Publications</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../genindex.html">索引</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
      
      <nav class="wy-nav-top" aria-label="top navigation" data-toggle="wy-nav-top">
        
            <div class="togglemenu">

            </div>
            <div class="nav-content">
              <!-- tvm -->
              Table of content
            </div>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        

          




















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../index.html">Docs</a> <span class="br-arrow">></span></li>
        
          <li><a href="../index.html">How To Guides</a> <span class="br-arrow">></span></li>
        
          <li><a href="index.html">TVM 部署模型和集成</a> <span class="br-arrow">></span></li>
        
      <li>Vitis AI Integration</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
            <a href="../../_sources/how_to/deploy/vitis_ai.rst.txt" rel="nofollow"> <img src="../../_static//img/source.svg" alt="viewsource"/></a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="vitis-ai-integration">
<h1>Vitis AI Integration<a class="headerlink" href="#vitis-ai-integration" title="永久链接至标题">¶</a></h1>
<p><a class="reference external" href="https://github.com/Xilinx/Vitis-AI">Vitis AI</a> is Xilinx’s
development stack for hardware-accelerated AI inference on Xilinx
platforms, including both edge devices and Alveo cards. It consists of
optimized IP, tools, libraries, models, and example designs. It is
designed with high efficiency and ease of use in mind, unleashing the
full potential of AI acceleration on Xilinx FPGA and ACAP.</p>
<p>The current Vitis AI flow inside TVM enables acceleration of Neural
Network model inference on edge and cloud with the <a class="reference external" href="https://www.xilinx.com/products/silicon-devices/soc/zynq-ultrascale-mpsoc.html">Zynq Ultrascale+
MPSoc</a>,
<a class="reference external" href="https://www.xilinx.com/products/boards-and-kits/alveo.html">Alveo</a>
and <a class="reference external" href="https://www.xilinx.com/products/silicon-devices/acap/versal.html">Versal</a> platforms.
The identifiers for the supported edge and cloud Deep Learning Processor Units (DPU’s) are:</p>
<table class="docutils align-default">
<colgroup>
<col style="width: 64%" />
<col style="width: 16%" />
<col style="width: 20%" />
</colgroup>
<thead>
<tr class="row-odd"><th class="head"><p><strong>Target Board</strong></p></th>
<th class="head"><p><strong>DPU ID</strong></p></th>
<th class="head"><p><strong>TVM Target ID</strong></p></th>
</tr>
</thead>
<tbody>
<tr class="row-even"><td><p><a class="reference external" href="https://www.xilinx.com/products/boards-and-kits/zcu104.html">ZCU104</a></p></td>
<td><p>DPUCZDX8G</p></td>
<td><p>DPUCZDX8G-zcu104</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference external" href="https://www.xilinx.com/products/boards-and-kits/ek-u1-zcu102-g.html">ZCU102</a></p></td>
<td><p>DPUCZDX8G</p></td>
<td><p>DPUCZDX8G-zcu102</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference external" href="https://www.xilinx.com/products/som/kria/kv260-vision-starter-kit.html">Kria KV260</a></p></td>
<td><p>DPUCZDX8G</p></td>
<td><p>DPUCZDX8G-kv260</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference external" href="https://www.xilinx.com/products/boards-and-kits/vck190.html">VCK190</a></p></td>
<td><p>DPUCVDX8G</p></td>
<td><p>DPUCVDX8G</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference external" href="https://www.xilinx.com/products/boards-and-kits/vck5000.html">VCK5000</a></p></td>
<td><p>DPUCVDX8H</p></td>
<td><p>DPUCVDX8H</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference external" href="https://www.xilinx.com/products/boards-and-kits/alveo/u200.html">U200</a></p></td>
<td><p>DPUCADF8H</p></td>
<td><p>DPUCADF8H</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference external" href="https://www.xilinx.com/products/boards-and-kits/alveo/u250.html">U250</a></p></td>
<td><p>DPUCADF8H</p></td>
<td><p>DPUCADF8H</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference external" href="https://www.xilinx.com/products/boards-and-kits/alveo/u50.html">U50</a></p></td>
<td><p>DPUCAHX8H / DPUCAHX8L</p></td>
<td><p>DPUCAHX8H-u50 / DPUCAHX8L</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference external" href="https://www.xilinx.com/products/boards-and-kits/alveo/u280.html">U280</a></p></td>
<td><p>DPUCAHX8H / DPUCAHX8L</p></td>
<td><p>DPUCAHX8H-u280 / DPUCAHX8L</p></td>
</tr>
</tbody>
</table>
<p>For more information about the DPU identifiers see following table:</p>
<table class="docutils align-default">
<colgroup>
<col style="width: 14%" />
<col style="width: 10%" />
<col style="width: 24%" />
<col style="width: 18%" />
<col style="width: 18%" />
<col style="width: 18%" />
</colgroup>
<thead>
<tr class="row-odd"><th class="head"><p>DPU</p></th>
<th class="head"><p>Application</p></th>
<th class="head"><p>HW Platform</p></th>
<th class="head"><p>Quantization Method</p></th>
<th class="head"><p>Quantization Bitwidth</p></th>
<th class="head"><p>Design Target</p></th>
</tr>
</thead>
<tbody>
<tr class="row-even"><td><div class="line-block">
<div class="line">Deep Learning</div>
<div class="line">Processing Unit</div>
</div>
</td>
<td><div class="line-block">
<div class="line">C: CNN</div>
<div class="line">R: RNN</div>
</div>
</td>
<td><div class="line-block">
<div class="line">AD: Alveo DDR</div>
<div class="line">AH: Alveo HBM</div>
<div class="line">VD: Versal DDR with AIE &amp; PL</div>
<div class="line">ZD: Zynq DDR</div>
</div>
</td>
<td><div class="line-block">
<div class="line">X: DECENT</div>
<div class="line">I: Integer threshold</div>
<div class="line">F: Float threshold</div>
<div class="line">R: RNN</div>
</div>
</td>
<td><div class="line-block">
<div class="line">4: 4-bit</div>
<div class="line">8: 8-bit</div>
<div class="line">16: 16-bit</div>
<div class="line">M: Mixed Precision</div>
</div>
</td>
<td><div class="line-block">
<div class="line">G: General purpose</div>
<div class="line">H: High throughput</div>
<div class="line">L: Low latency</div>
<div class="line">C: Cost optimized</div>
</div>
</td>
</tr>
</tbody>
</table>
<p>On this page you will find information on how to <a class="reference external" href="#setup-instructions">setup</a> TVM with Vitis AI
on different platforms (Zynq, Alveo, Versal) and on how to get started with <a class="reference external" href="#compiling-a-model">Compiling a Model</a>
and executing on different platforms: <a class="reference external" href="#inference">Inference</a>.</p>
<div class="section" id="system-requirements">
<h2>System Requirements<a class="headerlink" href="#system-requirements" title="永久链接至标题">¶</a></h2>
<p>The <a class="reference external" href="https://github.com/Xilinx/Vitis-AI/blob/master/docs/learn/system_requirements.md">Vitis AI System Requirements page</a>
lists the system requirements for running docker containers as well as doing executing on Alveo cards.
For edge devices (e.g. Zynq), deploying models requires a host machine for compiling models using the TVM with Vitis AI flow,
and an edge device for running the compiled models. The host system requirements are the same as specified in the link above.</p>
</div>
<div class="section" id="setup-instructions">
<h2>Setup instructions<a class="headerlink" href="#setup-instructions" title="永久链接至标题">¶</a></h2>
<p>This section provide the instructions for setting up the TVM with Vitis AI flow for both cloud and edge.
TVM with Vitis AI support is provided through a docker container. The provided scripts and Dockerfile
compiles TVM and Vitis AI into a single image.</p>
<ol class="arabic">
<li><p>Clone TVM repo</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>git clone --recursive https://github.com/apache/tvm.git
<span class="nb">cd</span> tvm
</pre></div>
</div>
</li>
<li><p>Build and start the TVM - Vitis AI docker container.</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>./docker/build.sh demo_vitis_ai bash
./docker/bash.sh tvm.demo_vitis_ai

<span class="c1"># Setup inside container</span>
conda activate vitis-ai-tensorflow
</pre></div>
</div>
</li>
<li><p>Build TVM inside the container with Vitis AI (inside tvm directory)</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>mkdir build
cp cmake/config.cmake build
<span class="nb">cd</span> build
<span class="nb">echo</span> set<span class="se">\(</span>USE_LLVM ON<span class="se">\)</span> &gt;&gt; config.cmake
<span class="nb">echo</span> set<span class="se">\(</span>USE_VITIS_AI ON<span class="se">\)</span> &gt;&gt; config.cmake
cmake ..
make -j<span class="k">$(</span>nproc<span class="k">)</span>
</pre></div>
</div>
</li>
<li><p>Install TVM</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span><span class="nb">cd</span> ../python
pip3 install -e . --user
</pre></div>
</div>
</li>
</ol>
<p>Inside this docker container you can now compile models for both cloud and edge targets.
To run on cloud Alveo or Versal VCK5000 cards inside the docker container, please follow the
<a class="reference external" href="#alveo-setup">Alveo</a> respectively  <a class="reference external" href="#versal-vck5000-setup">Versal VCK5000</a> setup instructions.
To setup your Zynq or Versal VCK190 evaluation board for inference, please follow
the <a class="reference external" href="#zynq-setup">Zynq</a> respectively <a class="reference external" href="#versal-vck190-setup">Versal VCK190</a> instructions.</p>
<div class="section" id="alveo-setup">
<h3>Alveo Setup<a class="headerlink" href="#alveo-setup" title="永久链接至标题">¶</a></h3>
<p>Check out following page for setup information: <a class="reference external" href="https://github.com/Xilinx/Vitis-AI/blob/v1.4/setup/alveo/README.md">Alveo Setup</a>.</p>
<p>After setup, you can select the right DPU inside the docker container in the following way:</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span><span class="nb">cd</span> /workspace
git clone --branch v1.4 --single-branch --recursive https://github.com/Xilinx/Vitis-AI.git
<span class="nb">cd</span> Vitis-AI/setup/alveo
<span class="nb">source</span> setup.sh <span class="o">[</span>DPU-IDENTIFIER<span class="o">]</span>
</pre></div>
</div>
<p>The DPU identifier for this can be found in the second column of the DPU Targets table at the top of this page.</p>
</div>
<div class="section" id="versal-vck5000-setup">
<h3>Versal VCK5000 Setup<a class="headerlink" href="#versal-vck5000-setup" title="永久链接至标题">¶</a></h3>
<p>Check out following page for setup information: <a class="reference external" href="https://github.com/Xilinx/Vitis-AI/blob/v1.4/setup/vck5000/README.md">VCK5000 Setup</a>.</p>
<p>After setup, you can select the right DPU inside the docker container in the following way:</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span><span class="nb">cd</span> /workspace
git clone --branch v1.4 --single-branch --recursive https://github.com/Xilinx/Vitis-AI.git
<span class="nb">cd</span> Vitis-AI/setup/vck5000
<span class="nb">source</span> setup.sh
</pre></div>
</div>
</div>
<div class="section" id="zynq-setup">
<h3>Zynq Setup<a class="headerlink" href="#zynq-setup" title="永久链接至标题">¶</a></h3>
<p>For the Zynq target (DPUCZDX8G) the compilation stage will run inside the docker on a host machine.
This doesn’t require any specific setup except for building the TVM - Vitis AI docker. For executing the model,
the Zynq board will first have to be set up and more information on that can be found here.</p>
<ol class="arabic simple">
<li><dl class="simple">
<dt>Download the Petalinux image for your target:</dt><dd><ul class="simple">
<li><p><a class="reference external" href="https://www.xilinx.com/member/forms/download/design-license-xef.html?filename=xilinx-zcu104-dpu-v2021.1-v1.4.0.img.gz">ZCU104</a></p></li>
<li><p><a class="reference external" href="https://www.xilinx.com/member/forms/download/design-license-xef.html?filename=xilinx-zcu102-dpu-v2021.1-v1.4.0.img.gz">ZCU102</a></p></li>
<li><p><a class="reference external" href="https://www.xilinx.com/member/forms/download/design-license-xef.html?filename=xilinx-kv260-dpu-v2020.2-v1.4.0.img.gz">Kria KV260</a></p></li>
</ul>
</dd>
</dl>
</li>
<li><p>Use Etcher software to burn the image file onto the SD card.</p></li>
<li><p>Insert the SD card with the image into the destination board.</p></li>
<li><p>Plug in the power and boot the board using the serial port to operate on the system.</p></li>
<li><p>Set up the IP information of the board using the serial port. For more details on step 1 to 5, please refer to <a class="reference external" href="https://www.xilinx.com/html_docs/vitis_ai/1_4/installation.html#ariaid-title8">Setting Up The Evaluation Board</a>.</p></li>
<li><p>Create 4GB of swap space on the board</p></li>
</ol>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>fallocate -l 4G /swapfile
chmod <span class="m">600</span> /swapfile
mkswap /swapfile
swapon /swapfile
<span class="nb">echo</span> <span class="s2">&quot;/swapfile swap swap defaults 0 0&quot;</span> &gt; /etc/fstab
</pre></div>
</div>
<ol class="arabic simple" start="7">
<li><p>Install hdf5 dependency (will take between 30 min and 1 hour to finish)</p></li>
</ol>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span><span class="nb">cd</span> /tmp <span class="o">&amp;&amp;</span> <span class="se">\</span>
  wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.10/hdf5-1.10.7/src/hdf5-1.10.7.tar.gz <span class="o">&amp;&amp;</span> <span class="se">\</span>
  tar -zxvf hdf5-1.10.7.tar.gz <span class="o">&amp;&amp;</span> <span class="se">\</span>
  <span class="nb">cd</span> hdf5-1.10.7 <span class="o">&amp;&amp;</span> <span class="se">\</span>
  ./configure --prefix<span class="o">=</span>/usr <span class="o">&amp;&amp;</span> <span class="se">\</span>
  make -j<span class="k">$(</span>nproc<span class="k">)</span> <span class="o">&amp;&amp;</span> <span class="se">\</span>
  make install <span class="o">&amp;&amp;</span> <span class="se">\</span>
  <span class="nb">cd</span> /tmp <span class="o">&amp;&amp;</span> rm -rf hdf5-1.10.7*
</pre></div>
</div>
<ol class="arabic simple" start="8">
<li><p>Install Python dependencies</p></li>
</ol>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>pip3 install <span class="nv">Cython</span><span class="o">==</span><span class="m">0</span>.29.23 <span class="nv">h5py</span><span class="o">==</span><span class="m">2</span>.10.0 pillow
</pre></div>
</div>
<ol class="arabic simple" start="9">
<li><p>Install PyXIR</p></li>
</ol>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>git clone --recursive --branch rel-v0.3.1 --single-branch https://github.com/Xilinx/pyxir.git
<span class="nb">cd</span> pyxir
sudo python3 setup.py install --use_vart_edge_dpu
</pre></div>
</div>
<ol class="arabic simple" start="10">
<li><p>Build and install TVM with Vitis AI</p></li>
</ol>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>git clone --recursive https://github.com/apache/tvm
<span class="nb">cd</span> tvm
mkdir build
cp cmake/config.cmake build
<span class="nb">cd</span> build
<span class="nb">echo</span> set<span class="se">\(</span>USE_LLVM OFF<span class="se">\)</span> &gt;&gt; config.cmake
<span class="nb">echo</span> set<span class="se">\(</span>USE_VITIS_AI ON<span class="se">\)</span> &gt;&gt; config.cmake
cmake ..
make tvm_runtime -j<span class="k">$(</span>nproc<span class="k">)</span>
<span class="nb">cd</span> ../python
pip3 install --no-deps  -e .
</pre></div>
</div>
<ol class="arabic simple" start="11">
<li><p>Check whether the setup was successful in the Python shell:</p></li>
</ol>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>python3 -c <span class="s1">&#39;import pyxir; import tvm&#39;</span>
</pre></div>
</div>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>You might see a warning about the ‘cpu-tf’ runtime not being found. This warning is
expected on the board and can be ignored.</p>
</div>
</div>
<div class="section" id="versal-vck190-setup">
<h3>Versal VCK190 Setup<a class="headerlink" href="#versal-vck190-setup" title="永久链接至标题">¶</a></h3>
<p>For the Versal VCK190 setup, please follow the instructions for <a class="reference external" href="#zynq-setup">Zynq Setup</a>,
but now use the <a class="reference external" href="https://www.xilinx.com/member/forms/download/design-license-xef.html?filename=xilinx-vck190-dpu-v2020.2-v1.4.0.img.gz">VCK190 image</a>
in step 1. The other steps are the same.</p>
</div>
</div>
<div class="section" id="compiling-a-model">
<h2>Compiling a Model<a class="headerlink" href="#compiling-a-model" title="永久链接至标题">¶</a></h2>
<p>The TVM with Vitis AI flow contains two stages: Compilation and Inference.
During the compilation a user can choose a model to compile for the cloud or
edge target devices that are currently supported. Once a model is compiled,
the generated files can be used to run the model on a the specified target
device during the <a class="reference external" href="#inference">Inference</a> stage. Currently, the TVM with
Vitis AI flow supported a selected number of Xilinx data center and edge devices.</p>
<p>In this section we walk through the typical flow for compiling models with Vitis AI
inside TVM.</p>
<p><strong>Imports</strong></p>
<p>Make sure to import PyXIR and the DPU target (<code class="docutils literal notranslate"><span class="pre">import</span> <span class="pre">pyxir.contrib.target.DPUCADF8H</span></code> for DPUCADF8H):</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">pyxir</span>
<span class="kn">import</span> <span class="nn">pyxir.contrib.target.DPUCADF8H</span>

<span class="kn">import</span> <span class="nn">tvm</span>
<span class="kn">import</span> <span class="nn">tvm.relay</span> <span class="kn">as</span> <span class="nn">relay</span>
<span class="kn">from</span> <span class="nn">tvm.contrib.target</span> <span class="kn">import</span> <span class="n">vitis_ai</span>
<span class="kn">from</span> <span class="nn">tvm.contrib</span> <span class="kn">import</span> <span class="n">utils</span><span class="p">,</span> <span class="n">graph_executor</span>
<span class="kn">from</span> <span class="nn">tvm.relay.op.contrib.vitis_ai</span> <span class="kn">import</span> <span class="n">partition_for_vitis_ai</span>
</pre></div>
</div>
<p><strong>Declare the Target</strong></p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">tvm_target</span> <span class="o">=</span> <span class="s1">&#39;llvm&#39;</span>
<span class="n">dpu_target</span> <span class="o">=</span> <span class="s1">&#39;DPUCADF8H&#39;</span> <span class="c1"># options: &#39;DPUCADF8H&#39;, &#39;DPUCAHX8H-u50&#39;, &#39;DPUCAHX8H-u280&#39;, &#39;DPUCAHX8L&#39;, &#39;DPUCVDX8H&#39;, &#39;DPUCZDX8G-zcu104&#39;, &#39;DPUCZDX8G-zcu102&#39;, &#39;DPUCZDX8G-kv260&#39;</span>
</pre></div>
</div>
<p>The TVM with Vitis AI flow currently supports the DPU targets listed in
the table at the top of this page. Once the appropriate targets are defined,
we invoke the TVM compiler to build the graph for the specified target.</p>
<p><strong>Import the Model</strong></p>
<p>Example code to import an MXNet model:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">frontend</span><span class="o">.</span><span class="n">from_mxnet</span><span class="p">(</span><span class="n">block</span><span class="p">,</span> <span class="n">input_shape</span><span class="p">)</span>
</pre></div>
</div>
<p><strong>Partition the Model</strong></p>
<p>After importing the model, we utilize the Relay API to annotate the Relay expression for the provided DPU target and partition the graph.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">mod</span> <span class="o">=</span> <span class="n">partition_for_vitis_ai</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">dpu</span><span class="o">=</span><span class="n">dpu_target</span><span class="p">)</span>
</pre></div>
</div>
<p><strong>Build the Model</strong></p>
<p>The partitioned model is passed to the TVM compiler to generate the runtime libraries for the TVM Runtime.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">export_rt_mod_file</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">(),</span> <span class="s1">&#39;vitis_ai.rtmod&#39;</span><span class="p">)</span>
<span class="n">build_options</span> <span class="o">=</span> <span class="p">{</span>
    <span class="s1">&#39;dpu&#39;</span><span class="p">:</span> <span class="n">dpu_target</span><span class="p">,</span>
    <span class="s1">&#39;export_runtime_module&#39;</span><span class="p">:</span> <span class="n">export_rt_mod_file</span>
<span class="p">}</span>
<span class="k">with</span> <span class="n">tvm</span><span class="o">.</span><span class="n">transform</span><span class="o">.</span><span class="n">PassContext</span><span class="p">(</span><span class="n">opt_level</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span> <span class="n">config</span><span class="o">=</span><span class="p">{</span><span class="s1">&#39;relay.ext.vitis_ai.options&#39;</span><span class="p">:</span> <span class="n">build_options</span><span class="p">}):</span>
    <span class="n">lib</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">build</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">tvm_target</span><span class="p">,</span> <span class="n">params</span><span class="o">=</span><span class="n">params</span><span class="p">)</span>
</pre></div>
</div>
<p><strong>Quantize the Model</strong></p>
<p>Usually, to be able to accelerate inference of Neural Network models
with Vitis AI DPU accelerators, those models need to quantized upfront.
In TVM - Vitis AI flow, we make use of on-the-fly quantization to remove
this additional preprocessing step. In this flow, one doesn’t need to
quantize his/her model upfront but can make use of the typical inference
execution calls (module.run) to quantize the model on-the-fly using the
first N inputs that are provided (see more information below). This will
set up and calibrate the Vitis-AI DPU and from that point onwards
inference will be accelerated for all next inputs. Note that the edge
flow deviates slightly from the explained flow in that inference won’t
be accelerated after the first N inputs but the model will have been
quantized and compiled and can be moved to the edge device for
deployment. Please check out the <a class="reference external" href="#running-on-zynq">Running on Zynq</a>
section below for more information.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">module</span> <span class="o">=</span> <span class="n">graph_executor</span><span class="o">.</span><span class="n">GraphModule</span><span class="p">(</span><span class="n">lib</span><span class="p">[</span><span class="s2">&quot;default&quot;</span><span class="p">](</span><span class="n">tvm</span><span class="o">.</span><span class="n">cpu</span><span class="p">()))</span>

<span class="c1"># First N (default = 128) inputs are used for quantization calibration and will</span>
<span class="c1"># be executed on the CPU</span>
<span class="c1"># This config can be changed by setting the &#39;PX_QUANT_SIZE&#39; (e.g. export PX_QUANT_SIZE=64)</span>
<span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">128</span><span class="p">):</span>
   <span class="n">module</span><span class="o">.</span><span class="n">set_input</span><span class="p">(</span><span class="n">input_name</span><span class="p">,</span> <span class="n">inputs</span><span class="p">[</span><span class="n">i</span><span class="p">])</span>
   <span class="n">module</span><span class="o">.</span><span class="n">run</span><span class="p">()</span>
</pre></div>
</div>
<p>By default, the number of images used for quantization is set to 128.
You could change the number of images used for On-The-Fly Quantization
with the PX_QUANT_SIZE environment variable. For example, execute the
following line in the terminal before calling the compilation script
to reduce the quantization calibration dataset to eight images.
This can be used for quick testing.</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span><span class="nb">export</span> <span class="nv">PX_QUANT_SIZE</span><span class="o">=</span><span class="m">8</span>
</pre></div>
</div>
<p>Lastly, we store the compiled output from the TVM compiler on disk for
running the model on the target device. This happens as follows for
cloud DPU’s (Alveo, VCK5000):</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">lib_path</span> <span class="o">=</span> <span class="s2">&quot;deploy_lib.so&quot;</span>
<span class="n">lib</span><span class="o">.</span><span class="n">export_library</span><span class="p">(</span><span class="n">lib_path</span><span class="p">)</span>
</pre></div>
</div>
<p>For edge targets (Zynq, VCK190) we have to rebuild for aarch64. To do this
we first have to normally export the module to also serialize the Vitis AI
runtime module (vitis_ai.rtmod). We will load this runtime module again
afterwards to rebuild and export for aarch64.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">temp</span> <span class="o">=</span> <span class="n">utils</span><span class="o">.</span><span class="n">tempdir</span><span class="p">()</span>
<span class="n">lib</span><span class="o">.</span><span class="n">export_library</span><span class="p">(</span><span class="n">temp</span><span class="o">.</span><span class="n">relpath</span><span class="p">(</span><span class="s2">&quot;tvm_lib.so&quot;</span><span class="p">))</span>

<span class="c1"># Build and export lib for aarch64 target</span>
<span class="n">tvm_target</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">target</span><span class="o">.</span><span class="n">arm_cpu</span><span class="p">(</span><span class="s1">&#39;ultra96&#39;</span><span class="p">)</span>
<span class="n">lib_kwargs</span> <span class="o">=</span> <span class="p">{</span>
   <span class="s1">&#39;fcompile&#39;</span><span class="p">:</span> <span class="n">contrib</span><span class="o">.</span><span class="n">cc</span><span class="o">.</span><span class="n">create_shared</span><span class="p">,</span>
   <span class="s1">&#39;cc&#39;</span><span class="p">:</span> <span class="s2">&quot;/usr/aarch64-linux-gnu/bin/ld&quot;</span>
<span class="p">}</span>

<span class="n">build_options</span> <span class="o">=</span> <span class="p">{</span>
    <span class="s1">&#39;load_runtime_module&#39;</span><span class="p">:</span> <span class="n">export_rt_mod_file</span>
<span class="p">}</span>
<span class="k">with</span> <span class="n">tvm</span><span class="o">.</span><span class="n">transform</span><span class="o">.</span><span class="n">PassContext</span><span class="p">(</span><span class="n">opt_level</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span> <span class="n">config</span><span class="o">=</span><span class="p">{</span><span class="s1">&#39;relay.ext.vitis_ai.options&#39;</span><span class="p">:</span> <span class="n">build_options</span><span class="p">}):</span>
     <span class="n">lib_edge</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">build</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">tvm_target</span><span class="p">,</span> <span class="n">params</span><span class="o">=</span><span class="n">params</span><span class="p">)</span>

<span class="n">lib_edge</span><span class="o">.</span><span class="n">export_library</span><span class="p">(</span><span class="s1">&#39;deploy_lib_edge.so&#39;</span><span class="p">,</span> <span class="o">**</span><span class="n">lib_kwargs</span><span class="p">)</span>
</pre></div>
</div>
<p>This concludes the tutorial to compile a model using TVM with Vitis AI.
For instructions on how to run a compiled model please refer to the next section.</p>
</div>
<div class="section" id="inference">
<h2>Inference<a class="headerlink" href="#inference" title="永久链接至标题">¶</a></h2>
<p>The TVM with Vitis AI flow contains two stages: Compilation and Inference.
During the compilation a user can choose to compile a model for any of the
target devices that are currently supported. Once a model is compiled, the
generated files can be used to run the model on a target device during the
Inference stage.</p>
<p>Check out the <a class="reference external" href="#running-on-alveo-and-vck5000">Running on Alveo and VCK5000</a>
and <a class="reference external" href="#running-on-zynq-and-vck190">Running on Zynq and VCK190</a> sections for
doing inference on cloud accelerator cards respectively edge boards.</p>
<div class="section" id="running-on-alveo-and-vck5000">
<h3>Running on Alveo and VCK5000<a class="headerlink" href="#running-on-alveo-and-vck5000" title="永久链接至标题">¶</a></h3>
<p>After having followed the steps in the <a class="reference external" href="#compiling-a-model">Compiling a Model</a>
section, you can continue running on new inputs inside the docker for accelerated
inference:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">module</span><span class="o">.</span><span class="n">set_input</span><span class="p">(</span><span class="n">input_name</span><span class="p">,</span> <span class="n">inputs</span><span class="p">[</span><span class="n">i</span><span class="p">])</span>
<span class="n">module</span><span class="o">.</span><span class="n">run</span><span class="p">()</span>
</pre></div>
</div>
<p>Alternatively, you can load the exported runtime module (the deploy_lib.so
exported in  <a class="reference external" href="#compiling-a-model">Compiling a Model</a>):</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">pyxir</span>
<span class="kn">import</span> <span class="nn">tvm</span>
<span class="kn">from</span> <span class="nn">tvm.contrib</span> <span class="kn">import</span> <span class="n">graph_executor</span>

<span class="n">dev</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span>

<span class="c1"># input_name = ...</span>
<span class="c1"># input_data = ...</span>

<span class="c1"># load the module into memory</span>
<span class="n">lib</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">runtime</span><span class="o">.</span><span class="n">load_module</span><span class="p">(</span><span class="s2">&quot;deploy_lib.so&quot;</span><span class="p">)</span>

<span class="n">module</span> <span class="o">=</span> <span class="n">graph_executor</span><span class="o">.</span><span class="n">GraphModule</span><span class="p">(</span><span class="n">lib</span><span class="p">[</span><span class="s2">&quot;default&quot;</span><span class="p">](</span><span class="n">dev</span><span class="p">))</span>
<span class="n">module</span><span class="o">.</span><span class="n">set_input</span><span class="p">(</span><span class="n">input_name</span><span class="p">,</span> <span class="n">input_data</span><span class="p">)</span>
<span class="n">module</span><span class="o">.</span><span class="n">run</span><span class="p">()</span>
</pre></div>
</div>
</div>
<div class="section" id="running-on-zynq-and-vck190">
<h3>Running on Zynq and VCK190<a class="headerlink" href="#running-on-zynq-and-vck190" title="永久链接至标题">¶</a></h3>
<p>Before proceeding, please follow the  <a class="reference external" href="#zynq-setup">Zynq</a> or
<a class="reference external" href="#versal-vck190-setup">Versal VCK190</a> setup instructions.</p>
<p>Prior to running a model on the board, you need to compile the model for
your target evaluation board and transfer the compiled model on to the board.
Please refer to the <a class="reference external" href="#compiling-a-model">Compiling a Model</a> section for
information on how to compile a model.</p>
<p>Afterwards, you will have to transfer the compiled model (deploy_lib_edge.so)
to the evaluation board. Then, on the board you can use the typical
“load_module” and “module.run” APIs to execute. For this, please make sure to
run the script as root (execute <code class="docutils literal notranslate"><span class="pre">su</span></code> in terminal to log into root).</p>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>Note also that you <strong>shouldn’t</strong> import the
PyXIR DPU targets in the run script (<code class="docutils literal notranslate"><span class="pre">import</span> <span class="pre">pyxir.contrib.target.DPUCZDX8G</span></code>).</p>
</div>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">pyxir</span>
<span class="kn">import</span> <span class="nn">tvm</span>
<span class="kn">from</span> <span class="nn">tvm.contrib</span> <span class="kn">import</span> <span class="n">graph_executor</span>

<span class="n">dev</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span>

<span class="c1"># input_name = ...</span>
<span class="c1"># input_data = ...</span>

<span class="c1"># load the module into memory</span>
<span class="n">lib</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">runtime</span><span class="o">.</span><span class="n">load_module</span><span class="p">(</span><span class="s2">&quot;deploy_lib_edge.so&quot;</span><span class="p">)</span>

<span class="n">module</span> <span class="o">=</span> <span class="n">graph_executor</span><span class="o">.</span><span class="n">GraphModule</span><span class="p">(</span><span class="n">lib</span><span class="p">[</span><span class="s2">&quot;default&quot;</span><span class="p">](</span><span class="n">dev</span><span class="p">))</span>
<span class="n">module</span><span class="o">.</span><span class="n">set_input</span><span class="p">(</span><span class="n">input_name</span><span class="p">,</span> <span class="n">input_data</span><span class="p">)</span>
<span class="n">module</span><span class="o">.</span><span class="n">run</span><span class="p">()</span>
</pre></div>
</div>
</div>
</div>
</div>


           </div>
           
          </div>
          

<footer>

    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="bnns.html" class="btn btn-neutral float-right" title="Relay BNNS Integration" accesskey="n" rel="next">下一个 <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="tensorrt.html" class="btn btn-neutral float-left" title="Relay TensorRT Integration" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> 上一个</a>
      
    </div>

<div id="button" class="backtop"><img src="../../_static//img/right.svg" alt="backtop"/> </div>
<section class="footerSec">
    <div class="footerHeader">
      <ul class="d-flex align-md-items-center justify-content-between flex-column flex-md-row">
        <li class="copywrite d-flex align-items-center">
          <h5 id="copy-right-info">© 2020 Apache Software Foundation | All right reserved</h5>
        </li>
      </ul>

    </div>

    <ul>
      <li class="footernote">Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather, and the Apache TVM project logo are either trademarks or registered trademarks of the Apache Software Foundation.</li>
    </ul>

</section>
</footer>
        </div>
      </div>

    </section>

  </div>
  

    <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script>
    <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" crossorigin="anonymous"></script>

  </body>
  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
    <!-- Theme Analytics -->
    <script>
    (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
      (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
      m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
    })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

    ga('create', 'UA-75982049-2', 'auto');
    ga('send', 'pageview');
    </script>

    
   

</body>
</html>