





<!DOCTYPE html>
<html class="writer-html5" lang="zh-CN" >
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>使用microTVM部署TFLite模型 &mdash; tvm 0.8.dev1982 文档</title>
  

  
  <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
  <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/gallery.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/css/tlcpack_theme.css" type="text/css" />

  
  
    <link rel="shortcut icon" href="../../_static/tvm-logo-square.png"/>
  

  
  
  
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../" src="../../_static/documentation_options.js"></script>
        <script data-url_root="../../" id="documentation_options" src="../../_static/documentation_options.js"></script>
        <script src="../../_static/jquery.js"></script>
        <script src="../../_static/underscore.js"></script>
        <script src="../../_static/doctools.js"></script>
        <script src="../../_static/translations.js"></script>
    
    <script type="text/javascript" src="../../_static/js/theme.js"></script>

    
    <script type="text/javascript" src="../../_static/js/tlcpack_theme.js"></script>
    <link rel="index" title="索引" href="../../genindex.html" />
    <link rel="search" title="搜索" href="../../search.html" />
    <link rel="next" title="Extend TVM" href="../extend_tvm/index.html" />
    <link rel="prev" title="microTVM 虚拟机" href="micro_reference_vm.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    
<header class="header">
    <div class="innercontainer">
      <div class="headerInner d-flex justify-content-between align-items-center">
          <div class="headerLogo">
               <a href="https://tvm.apache.org/"><img src=https://tvm.apache.org/assets/images/logo.svg alt="logo"></a>
          </div>

          <div id="headMenu" class="headerNav">
            <button type="button" id="closeHeadMenu" class="navCloseBtn"><img src="../../_static/img/close-icon.svg" alt="Close"></button>
             <ul class="nav">
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/community>Community</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/download>Download</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/vta>VTA</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/blog>Blog</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/docs>Docs</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmconf.org>Conference</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://github.com/apache/tvm/>Github</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmchinese.github.io/declaration_zh_CN.html>About-Translators</a>
                </li>
             </ul>
               <div class="responsivetlcdropdown">
                 <button type="button" class="btn-link">
                   ASF
                 </button>
                 <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                 </ul>
               </div>
          </div>
            <div class="responsiveMenuIcon">
              <button type="button" id="menuBtn" class="btn-menu"><img src="../../_static/img/menu-icon.svg" alt="Menu Icon"></button>
            </div>

            <div class="tlcDropdown">
              <div class="dropdown">
                <button type="button" class="btn-link dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
                  ASF
                </button>
                <div class="dropdown-menu dropdown-menu-right">
                  <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                  </ul>
                </div>
              </div>
          </div>
       </div>
    </div>
 </header>
 
    <nav data-toggle="wy-nav-shift" class="wy-nav-side fixed">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../index.html">
          

          
            
            <img src="../../_static/tvm-logo-small.png" class="logo" alt="Logo"/>
          
          </a>

          
            
            
                <div class="version">
                  0.8.dev1982
                </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption" role="heading"><span class="caption-text">如何开始</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../install/index.html">安装 TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../contribute/index.html">贡献者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">用户引导</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../../tutorial/index.html">User Tutorial</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="../index.html">How To Guides</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="../compile_models/index.html">编译深度学习模型</a></li>
<li class="toctree-l2"><a class="reference internal" href="../deploy/index.html">TVM 部署模型和集成</a></li>
<li class="toctree-l2"><a class="reference internal" href="../work_with_relay/index.html">Work With Relay</a></li>
<li class="toctree-l2"><a class="reference internal" href="../work_with_schedules/index.html">Work With Tensor Expression and Schedules</a></li>
<li class="toctree-l2"><a class="reference internal" href="../optimize_operators/index.html">优化张量算子</a></li>
<li class="toctree-l2"><a class="reference internal" href="../tune_with_autotvm/index.html">Auto-Tune with Templates and AutoTVM</a></li>
<li class="toctree-l2"><a class="reference internal" href="../tune_with_autoscheduler/index.html">Use AutoScheduler for Template-Free Scheduling</a></li>
<li class="toctree-l2 current"><a class="reference internal" href="index.html">Work With microTVM</a><ul class="current">
<li class="toctree-l3"><a class="reference internal" href="micro_autotune.html">Autotuning with micro TVM</a></li>
<li class="toctree-l3"><a class="reference internal" href="micro_reference_vm.html">microTVM 虚拟机</a></li>
<li class="toctree-l3 current"><a class="current reference internal" href="#">使用microTVM部署TFLite模型</a><ul>
<li class="toctree-l4"><a class="reference internal" href="#setup">设置</a></li>
<li class="toctree-l4"><a class="reference internal" href="#load-and-prepare-the-pre-trained-model">加载并准备预训练模型</a></li>
<li class="toctree-l4"><a class="reference internal" href="#defining-the-target">定义目标设备</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../extend_tvm/index.html">Extend TVM</a></li>
<li class="toctree-l2"><a class="reference internal" href="../profile/index.html">Profile Models</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../errors.html">Handle TVM Errors</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq.html">常见提问</a></li>
</ul>
</li>
</ul>
<p class="caption" role="heading"><span class="caption-text">开发者引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../dev/tutorial/index.html">Developer Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../dev/how_to/how_to.html">开发者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">架构指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../arch/index.html">Design and Architecture</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">主题引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../topic/microtvm/index.html">microTVM：裸机使用TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../topic/vta/index.html">VTA: Versatile Tensor Accelerator</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">参考指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../reference/langref/index.html">语言参考</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/api/python/index.html">Python API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/api/links.html">Other APIs</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/publications.html">Publications</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../genindex.html">索引</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
      
      <nav class="wy-nav-top" aria-label="top navigation" data-toggle="wy-nav-top">
        
            <div class="togglemenu">

            </div>
            <div class="nav-content">
              <!-- tvm -->
              Table of content
            </div>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        

          




















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../index.html">Docs</a> <span class="br-arrow">></span></li>
        
          <li><a href="../index.html">How To Guides</a> <span class="br-arrow">></span></li>
        
          <li><a href="index.html">Work With microTVM</a> <span class="br-arrow">></span></li>
        
      <li>使用microTVM部署TFLite模型</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
            <a href="../../_sources/how_to/work_with_microtvm/micro_tflite.rst.txt" rel="nofollow"> <img src="../../_static//img/source.svg" alt="viewsource"/></a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="sphx-glr-download-link-note admonition note">
<p class="admonition-title">注解</p>
<p>点击 <a class="reference internal" href="#sphx-glr-download-how-to-work-with-microtvm-micro-tflite-py"><span class="std std-ref">这里</span></a> 下载完整的样例代码</p>
</div>
<div class="sphx-glr-example-title section" id="microtvm-with-tflite-models">
<span id="microtvm-with-tflite"></span><span id="sphx-glr-how-to-work-with-microtvm-micro-tflite-py"></span><h1>使用microTVM部署TFLite模型<a class="headerlink" href="#microtvm-with-tflite-models" title="永久链接至标题">¶</a></h1>
<p><strong>作者</strong>: <a class="reference external" href="https://github.com/tom-gall">Tom Gall</a></p>
<p>本教程介绍了如何使用Relay模块和microTVM部署并运行一个TFLite模型。</p>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>如果您想在microTVM虚拟机上运行此教程，请使用此页面底部的链接下载 Jupyter notebook，并将其保存到 TVM 目录中。然后：</p>
<ol class="arabic">
<li><p>使用下面的 <code class="docutils literal notranslate"><span class="pre">vagrant</span> <span class="pre">ssh</span></code> 命令登陆mircoTVM虚拟机：</p>
<blockquote>
<div><p><code class="docutils literal notranslate"><span class="pre">$</span> <span class="pre">vagrant</span> <span class="pre">ssh</span> <span class="pre">--</span> <span class="pre">-L8888:localhost:8888</span></code></p>
</div></blockquote>
</li>
<li><p>安装 jupyter: <code class="docutils literal notranslate"><span class="pre">pip</span> <span class="pre">install</span> <span class="pre">jupyterlab</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">cd</span></code> 到 TVM 根目录</p></li>
<li><p>安装 tflite： <code class="docutils literal notranslate"><span class="pre">poetry</span> <span class="pre">install</span> <span class="pre">-E</span> <span class="pre">importer</span> <span class="pre">-tflite</span></code></p></li>
<li><p>启动 Jupyter Notebook： <code class="docutils literal notranslate"><span class="pre">jupyter</span> <span class="pre">notebook</span></code></p></li>
<li><p>复制该命令行显示的 URL，并将其粘贴到您主机的浏览器中。</p></li>
<li><p>通过浏览器显示的目录点击到已保存的 Jupyter Notebook （ <code class="docutils literal notranslate"><span class="pre">.ipynb</span></code> 文件）</p></li>
</ol>
</div>
<div class="section" id="setup">
<h2>设置<a class="headerlink" href="#setup" title="永久链接至标题">¶</a></h2>
<div class="section" id="install-tflite">
<h3>安装 TFLite<a class="headerlink" href="#install-tflite" title="永久链接至标题">¶</a></h3>
<p>首先，TFLite 是必要的软件依赖库。您可以通过以下两种方式进行安装：</p>
<ol class="arabic">
<li><p>通过 <code class="docutils literal notranslate"><span class="pre">pip</span></code> 命令安装 tflite</p>
<blockquote>
<div><div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>pip install <span class="nv">tflite</span><span class="o">=</span><span class="m">2</span>.1.0 --user
</pre></div>
</div>
</div></blockquote>
</li>
<li><p>自己生成 TFLite 库。步骤如下：</p>
<blockquote>
<div><p>获取 flatc 编译器。有关详细信息，请参阅 <a class="reference external" href="https://github.com/google/flatbuffers">https://github.com/google/flatbuffers</a> ，并通过下面的命令来确保 flatc 已正确安装。</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>flatc --version
</pre></div>
</div>
<p>获取 TFLite schema。</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>wget https://raw.githubusercontent.com/tensorflow/tensorflow/r1.13/tensorflow/lite/schema/schema.fbs
</pre></div>
</div>
<p>生成 TFLite 库。</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>flatc --python schema.fbs
</pre></div>
</div>
<p>将当前文件夹（包含生成的 tflite 模块）添加到 PYTHONPATH 中。</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span><span class="nb">export</span> <span class="nv">PYTHONPATH</span><span class="o">=</span><span class="si">${</span><span class="nv">PYTHONPATH</span><span class="p">:+</span><span class="nv">$PYTHONPATH</span><span class="p">:</span><span class="si">}</span><span class="k">$(</span><span class="nb">pwd</span><span class="k">)</span>
</pre></div>
</div>
</div></blockquote>
</li>
</ol>
<p>使用 <code class="docutils literal notranslate"><span class="pre">python</span> <span class="pre">-c</span> <span class="pre">&quot;import</span> <span class="pre">tflite&quot;</span></code> 来验证TFLite库是否正确安装</p>
</div>
<div class="section" id="install-zephyr-physical-hardware-only">
<h3>安装 Zephyr（仅限物理硬件）<a class="headerlink" href="#install-zephyr-physical-hardware-only" title="永久链接至标题">¶</a></h3>
<p>当您使用主机模拟器（默认值）运行此教程时，您可以使用主机上的 <code class="docutils literal notranslate"><span class="pre">gcc</span></code> 命令来构建模拟设备的固件（firmware image）。但是当您需要为物理硬件进行编译时，您需要安装 <em>工具链</em> 和一些特定于目标设备的软件依赖。microTVM 允许您使用任何可以启动 TVM RPC 服务的编译器和 runtime，此教程选择使用 Zephyr RTOS 来提供这些必要的工具和软件依赖。</p>
<p>您可以按照 <a class="reference external" href="https://docs.zephyrproject.org/latest/getting_started/index.html">安装说明</a> 安装 Zephyr 。（译者注：如果您是通过microTVM虚拟机运行该教程，该步骤可以忽略，因为microTVM虚拟机中已经包含了 Zephyr OS ）</p>
<dl>
<dt>另外：重新创建您自己的预训练 TFLite 模型</dt><dd><p>该教程将会下载一个预训练TFLite模型。当使用微控制器时，您需要注意，这些设备的资源非常紧张，以致于 MobileNet 等标准模型可能无法在这些设备上运行。</p>
<p>对于此教程，我们将使用一个 TF Micro 示例模型。</p>
<p>如果您想复现训练模型的步骤，请参阅： <a class="reference external" href="https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/hello_world/train">https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/hello_world/train</a></p>
<blockquote>
<div><div class="admonition note">
<p class="admonition-title">注解</p>
<p>如果您从下面的地址下载了示例模型：</p>
<p><code class="docutils literal notranslate"><span class="pre">wget</span> <span class="pre">https://storage.googleapis.com/download.tensorflow.org/models/tflite/micro/hello_world_2020_04_13.zip</span></code></p>
<p>本教程会因为未实现的操作码（114）而无法正常运行</p>
</div>
</div></blockquote>
</dd>
</dl>
</div>
</div>
<div class="section" id="load-and-prepare-the-pre-trained-model">
<h2>加载并准备预训练模型<a class="headerlink" href="#load-and-prepare-the-pre-trained-model" title="永久链接至标题">¶</a></h2>
<p>将预训练的 TFLite 模型从目录中的文件加载到内存缓冲中</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">import</span> <span class="nn">logging</span>

<span class="kn">import</span> <span class="nn">tvm</span>
<span class="kn">import</span> <span class="nn">tvm.micro</span> <span class="k">as</span> <span class="nn">micro</span>
<span class="kn">from</span> <span class="nn">tvm.contrib.download</span> <span class="k">import</span> <span class="n">download_testdata</span>
<span class="kn">from</span> <span class="nn">tvm.contrib</span> <span class="k">import</span> <span class="n">graph_executor</span><span class="p">,</span> <span class="n">utils</span>
<span class="kn">from</span> <span class="nn">tvm</span> <span class="k">import</span> <span class="n">relay</span>

<span class="n">model_url</span> <span class="o">=</span> <span class="s2">&quot;https://people.linaro.org/~tom.gall/sine_model.tflite&quot;</span>
<span class="n">model_file</span> <span class="o">=</span> <span class="s2">&quot;sine_model.tflite&quot;</span>
<span class="n">model_path</span> <span class="o">=</span> <span class="n">download_testdata</span><span class="p">(</span><span class="n">model_url</span><span class="p">,</span> <span class="n">model_file</span><span class="p">,</span> <span class="n">module</span><span class="o">=</span><span class="s2">&quot;data&quot;</span><span class="p">)</span>

<span class="n">tflite_model_buf</span> <span class="o">=</span> <span class="nb">open</span><span class="p">(</span><span class="n">model_path</span><span class="p">,</span> <span class="s2">&quot;rb&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">read</span><span class="p">()</span>
</pre></div>
</div>
<p>将缓冲转换成一个包含tflite模型的python对象</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">try</span><span class="p">:</span>
    <span class="kn">import</span> <span class="nn">tflite</span>

    <span class="n">tflite_model</span> <span class="o">=</span> <span class="n">tflite</span><span class="o">.</span><span class="n">Model</span><span class="o">.</span><span class="n">GetRootAsModel</span><span class="p">(</span><span class="n">tflite_model_buf</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>
<span class="k">except</span> <span class="ne">AttributeError</span><span class="p">:</span>
    <span class="kn">import</span> <span class="nn">tflite.Model</span>

    <span class="n">tflite_model</span> <span class="o">=</span> <span class="n">tflite</span><span class="o">.</span><span class="n">Model</span><span class="o">.</span><span class="n">Model</span><span class="o">.</span><span class="n">GetRootAsModel</span><span class="p">(</span><span class="n">tflite_model_buf</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>
</pre></div>
</div>
<p>打印模型的版本号</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">version</span> <span class="o">=</span> <span class="n">tflite_model</span><span class="o">.</span><span class="n">Version</span><span class="p">()</span>
<span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Model Version: &quot;</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">version</span><span class="p">))</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>Model Version: 3
</pre></div>
</div>
<p>将包含模型的python对象解析成 relay 模块和权重。 <strong>请注意：</strong>  <code class="docutils literal notranslate"><span class="pre">input_tensor</span></code> 的名称必须与模型中输入张量的名称保持一致。</p>
<p>如果您无法确定模型的输入张量名称，您可以通过Tensorflow工程内部自带的 <code class="docutils literal notranslate"><span class="pre">visualize.py</span></code> 脚本来确定它。详情请参阅 <a class="reference external" href="https://www.tensorflow.org/lite/guide/faq">如何察看一个 .tflite 文件？</a></p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">input_tensor</span> <span class="o">=</span> <span class="s2">&quot;dense_4_input&quot;</span>
<span class="n">input_shape</span> <span class="o">=</span> <span class="p">(</span><span class="mi">1</span><span class="p">,)</span>
<span class="n">input_dtype</span> <span class="o">=</span> <span class="s2">&quot;float32&quot;</span>

<span class="n">mod</span><span class="p">,</span> <span class="n">params</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">frontend</span><span class="o">.</span><span class="n">from_tflite</span><span class="p">(</span>
    <span class="n">tflite_model</span><span class="p">,</span> <span class="n">shape_dict</span><span class="o">=</span><span class="p">{</span><span class="n">input_tensor</span><span class="p">:</span> <span class="n">input_shape</span><span class="p">},</span> <span class="n">dtype_dict</span><span class="o">=</span><span class="p">{</span><span class="n">input_tensor</span><span class="p">:</span> <span class="n">input_dtype</span><span class="p">}</span>
<span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="defining-the-target">
<h2>定义目标设备<a class="headerlink" href="#defining-the-target" title="永久链接至标题">¶</a></h2>
<p>现在，我们需要为 relay 构建一个配置文件，关闭两个 <code class="docutils literal notranslate"><span class="pre">pass</span></code> 选项，然后调用 <code class="docutils literal notranslate"><span class="pre">relay.build</span></code> 来生成 TARGET 目标设备的 C 语言源代码。当运行此 Python 脚本的主机与待模拟目标设备的体系结构相同时，请您将 TARGET 设置为 “host”，并选择一个合适的 board/虚拟机 来运行脚本（Zephyr 会根据 BOARD 创建一个合适的 QEMU 虚拟机）。本例程选择使用 x86 体系结构和一个 x86 虚拟机：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">TARGET</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">target</span><span class="o">.</span><span class="n">target</span><span class="o">.</span><span class="n">micro</span><span class="p">(</span><span class="s2">&quot;host&quot;</span><span class="p">)</span>
<span class="n">BOARD</span> <span class="o">=</span> <span class="s2">&quot;qemu_x86&quot;</span>
<span class="c1">#</span>
<span class="c1"># Compiling for physical hardware</span>
<span class="c1">#  When running on physical hardware, choose a TARGET and a BOARD that describe the hardware. The</span>
<span class="c1">#  STM32F746 Nucleo target and board is chosen in the example below. Another option would be to</span>
<span class="c1">#  choose the STM32F746 Discovery board instead. Since that board has the same MCU as the Nucleo</span>
<span class="c1">#  board but a couple of wirings and configs differ, it&#39;s necessary to select the &quot;stm32f746g_disco&quot;</span>
<span class="c1">#  board to generated the right firmware image.</span>
<span class="c1">#</span>
<span class="c1">#  TARGET = tvm.target.target.micro(&quot;stm32f746xx&quot;)</span>
<span class="c1">#  BOARD = &quot;nucleo_f746zg&quot; # or &quot;stm32f746g_disco#&quot;</span>
<span class="c1">#</span>
<span class="c1">#  For some boards, Zephyr runs them emulated by default, using QEMU. For example, below is the</span>
<span class="c1">#  TARGET and BOARD used to build a microTVM firmware for the mps2-an521 board. Since that board</span>
<span class="c1">#  runs emulated by default on Zephyr the suffix &quot;-qemu&quot; is added to the board name to inform</span>
<span class="c1">#  microTVM that the QEMU transporter must be used to communicate with the board. If the board name</span>
<span class="c1">#  already has the prefix &quot;qemu_&quot;, like &quot;qemu_x86&quot;, then it&#39;s not necessary to add that suffix.</span>
<span class="c1">#</span>
<span class="c1">#  TARGET = tvm.target.target.micro(&quot;mps2_an521&quot;)</span>
<span class="c1">#  BOARD = &quot;mps2_an521-qemu&quot;</span>
</pre></div>
</div>
<p>现在，为目标设备编译模型：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">with</span> <span class="n">tvm</span><span class="o">.</span><span class="n">transform</span><span class="o">.</span><span class="n">PassContext</span><span class="p">(</span>
    <span class="n">opt_level</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span> <span class="n">config</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;tir.disable_vectorize&quot;</span><span class="p">:</span> <span class="kc">True</span><span class="p">},</span> <span class="n">disabled_pass</span><span class="o">=</span><span class="p">[</span><span class="s2">&quot;AlterOpLayout&quot;</span><span class="p">]</span>
<span class="p">):</span>
    <span class="n">module</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">build</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">target</span><span class="o">=</span><span class="n">TARGET</span><span class="p">,</span> <span class="n">params</span><span class="o">=</span><span class="n">params</span><span class="p">)</span>


<span class="c1"># Inspecting the compilation output</span>
<span class="c1"># ---------------------------------</span>
<span class="c1">#</span>
<span class="c1"># The compilation process has produced some C code implementing the operators in this graph. We</span>
<span class="c1"># can inspect it by printing the CSourceModule contents (for the purposes of this tutorial, let&#39;s</span>
<span class="c1"># just print the first 10 lines):</span>

<span class="n">c_source_module</span> <span class="o">=</span> <span class="n">module</span><span class="o">.</span><span class="n">get_lib</span><span class="p">()</span><span class="o">.</span><span class="n">imported_modules</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
<span class="k">assert</span> <span class="n">c_source_module</span><span class="o">.</span><span class="n">type_key</span> <span class="o">==</span> <span class="s2">&quot;c&quot;</span><span class="p">,</span> <span class="s2">&quot;tutorial is broken&quot;</span>

<span class="n">c_source_code</span> <span class="o">=</span> <span class="n">c_source_module</span><span class="o">.</span><span class="n">get_source</span><span class="p">()</span>
<span class="n">first_few_lines</span> <span class="o">=</span> <span class="n">c_source_code</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s2">&quot;</span><span class="se">\n</span><span class="s2">&quot;</span><span class="p">)[:</span><span class="mi">10</span><span class="p">]</span>
<span class="k">assert</span> <span class="nb">any</span><span class="p">(</span>
    <span class="n">l</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">&quot;TVM_DLL int32_t tvmgen_default_&quot;</span><span class="p">)</span> <span class="k">for</span> <span class="n">l</span> <span class="ow">in</span> <span class="n">first_few_lines</span>
<span class="p">),</span> <span class="n">f</span><span class="s2">&quot;tutorial is broken: </span><span class="si">{first_few_lines!r}</span><span class="s2">&quot;</span>
<span class="nb">print</span><span class="p">(</span><span class="s2">&quot;</span><span class="se">\n</span><span class="s2">&quot;</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">first_few_lines</span><span class="p">))</span>


<span class="c1"># Compiling the generated code</span>
<span class="c1"># ----------------------------</span>
<span class="c1">#</span>
<span class="c1"># Now we need to incorporate the generated C code into a project that allows us to run inference on the</span>
<span class="c1"># device. The simplest way to do this is to integrate it yourself, using microTVM&#39;s standard output format</span>
<span class="c1"># (:doc:`Model Library Format` &lt;/dev/model_library_format&gt;`). This is a tarball with a standard layout:</span>

<span class="c1"># Get a temporary path where we can store the tarball (since this is running as a tutorial).</span>
<span class="kn">import</span> <span class="nn">tempfile</span>

<span class="n">fd</span><span class="p">,</span> <span class="n">model_library_format_tar_path</span> <span class="o">=</span> <span class="n">tempfile</span><span class="o">.</span><span class="n">mkstemp</span><span class="p">()</span>
<span class="n">os</span><span class="o">.</span><span class="n">close</span><span class="p">(</span><span class="n">fd</span><span class="p">)</span>
<span class="n">os</span><span class="o">.</span><span class="n">unlink</span><span class="p">(</span><span class="n">model_library_format_tar_path</span><span class="p">)</span>
<span class="n">tvm</span><span class="o">.</span><span class="n">micro</span><span class="o">.</span><span class="n">export_model_library_format</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">model_library_format_tar_path</span><span class="p">)</span>

<span class="kn">import</span> <span class="nn">tarfile</span>

<span class="k">with</span> <span class="n">tarfile</span><span class="o">.</span><span class="n">open</span><span class="p">(</span><span class="n">model_library_format_tar_path</span><span class="p">,</span> <span class="s2">&quot;r:*&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">tar_f</span><span class="p">:</span>
    <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;</span><span class="se">\n</span><span class="s2">&quot;</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">f</span><span class="s2">&quot; - </span><span class="si">{m.name}</span><span class="s2">&quot;</span> <span class="k">for</span> <span class="n">m</span> <span class="ow">in</span> <span class="n">tar_f</span><span class="o">.</span><span class="n">getmembers</span><span class="p">()))</span>

<span class="c1"># Cleanup for tutorial:</span>
<span class="n">os</span><span class="o">.</span><span class="n">unlink</span><span class="p">(</span><span class="n">model_library_format_tar_path</span><span class="p">)</span>


<span class="c1"># TVM also provides a standard way for embedded platforms to automatically generate a standalone</span>
<span class="c1"># project, compile and flash it to a target, and communicate with it using the standard TVM RPC</span>
<span class="c1"># protocol. The Model Library Format serves as the model input to this process. When embedded</span>
<span class="c1"># platforms provide such an integration, they can be used directly by TVM for both host-driven</span>
<span class="c1"># inference and autotuning . This integration is provided by the</span>
<span class="c1"># `microTVM Project API` &lt;https://github.com/apache/tvm-rfcs/blob/main/rfcs/0008-microtvm-project-api.md&gt;_,</span>
<span class="c1">#</span>
<span class="c1"># Embedded platforms need to provide a Template Project containing a microTVM API Server (typically,</span>
<span class="c1"># this lives in a file ``microtvm_api_server.py`` in the root directory). Let&#39;s use the example ``host``</span>
<span class="c1"># project in this tutorial, which simulates the device using a POSIX subprocess and pipes:</span>

<span class="kn">import</span> <span class="nn">subprocess</span>
<span class="kn">import</span> <span class="nn">pathlib</span>

<span class="n">repo_root</span> <span class="o">=</span> <span class="n">pathlib</span><span class="o">.</span><span class="n">Path</span><span class="p">(</span>
    <span class="n">subprocess</span><span class="o">.</span><span class="n">check_output</span><span class="p">([</span><span class="s2">&quot;git&quot;</span><span class="p">,</span> <span class="s2">&quot;rev-parse&quot;</span><span class="p">,</span> <span class="s2">&quot;--show-toplevel&quot;</span><span class="p">],</span> <span class="n">encoding</span><span class="o">=</span><span class="s2">&quot;utf-8&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">strip</span><span class="p">()</span>
<span class="p">)</span>
<span class="n">template_project_path</span> <span class="o">=</span> <span class="n">repo_root</span> <span class="o">/</span> <span class="s2">&quot;src&quot;</span> <span class="o">/</span> <span class="s2">&quot;runtime&quot;</span> <span class="o">/</span> <span class="s2">&quot;crt&quot;</span> <span class="o">/</span> <span class="s2">&quot;host&quot;</span>
<span class="n">project_options</span> <span class="o">=</span> <span class="p">{}</span>  <span class="c1"># You can use options to provide platform-specific options through TVM.</span>

<span class="c1"># Compiling for physical hardware (or an emulated board, like the mps_an521)</span>
<span class="c1"># --------------------------------------------------------------------------</span>
<span class="c1">#  For physical hardware, you can try out the Zephyr platform by using a different template project</span>
<span class="c1">#  and options:</span>
<span class="c1">#</span>
<span class="c1">#     template_project_path = repo_root / &quot;apps&quot; / &quot;microtvm&quot; / &quot;zephyr&quot; / &quot;template_project&quot;</span>
<span class="c1">#     project_options = {&quot;project_type&quot;: &quot;host_driven&quot;, zephyr_board&quot;: &quot;nucleo_f746zg&quot;}}</span>

<span class="c1"># Create a temporary directory</span>
<span class="kn">import</span> <span class="nn">tvm.contrib.utils</span>

<span class="n">temp_dir</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">contrib</span><span class="o">.</span><span class="n">utils</span><span class="o">.</span><span class="n">tempdir</span><span class="p">()</span>
<span class="n">generated_project_dir</span> <span class="o">=</span> <span class="n">temp_dir</span> <span class="o">/</span> <span class="s2">&quot;generated-project&quot;</span>
<span class="n">generated_project</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">micro</span><span class="o">.</span><span class="n">generate_project</span><span class="p">(</span>
    <span class="n">template_project_path</span><span class="p">,</span> <span class="n">module</span><span class="p">,</span> <span class="n">generated_project_dir</span><span class="p">,</span> <span class="n">project_options</span>
<span class="p">)</span>

<span class="c1"># Build and flash the project</span>
<span class="n">generated_project</span><span class="o">.</span><span class="n">build</span><span class="p">()</span>
<span class="n">generated_project</span><span class="o">.</span><span class="n">flash</span><span class="p">()</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>// tvm target: c -keys=cpu -link-params=0 -model=host -runtime=c -system-lib=1
#define TVM_EXPORTS
#include &quot;tvm/runtime/c_runtime_api.h&quot;
#include &quot;tvm/runtime/c_backend_api.h&quot;
#include &lt;math.h&gt;
#ifdef __cplusplus
extern &quot;C&quot;
#endif
TVM_DLL int32_t tvmgen_default_fused_nn_dense_add(void* args, void* arg_type_ids, int32_t num_args, void* out_ret_value, void* out_ret_tcode, void* resource_handle) {
  void* arg0 = (((TVMValue*)args)[0].v_handle);
 - .
 - ./src
 - ./src/relay.txt
 - ./parameters
 - ./parameters/default.params
 - ./codegen
 - ./codegen/host
 - ./codegen/host/src
 - ./codegen/host/src/default_lib0.c
 - ./codegen/host/src/default_lib1.c
 - ./metadata.json
 - ./executor-config
 - ./executor-config/graph
 - ./executor-config/graph/graph.json
</pre></div>
</div>
<p>接下来，使用模拟设备建立会话并运行计算。一般情况下， <code class="docutils literal notranslate"><span class="pre">with</span> <span class="pre">session</span></code> 代码行会将固件烧录到微控制器中，但在本教程中，它仅启动了一个等待微控制器连接的子进程。</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">with</span> <span class="n">tvm</span><span class="o">.</span><span class="n">micro</span><span class="o">.</span><span class="n">Session</span><span class="p">(</span><span class="n">transport_context_manager</span><span class="o">=</span><span class="n">generated_project</span><span class="o">.</span><span class="n">transport</span><span class="p">())</span> <span class="k">as</span> <span class="n">session</span><span class="p">:</span>
    <span class="n">graph_mod</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">micro</span><span class="o">.</span><span class="n">create_local_graph_executor</span><span class="p">(</span>
        <span class="n">module</span><span class="o">.</span><span class="n">get_graph_json</span><span class="p">(),</span> <span class="n">session</span><span class="o">.</span><span class="n">get_system_lib</span><span class="p">(),</span> <span class="n">session</span><span class="o">.</span><span class="n">device</span>
    <span class="p">)</span>

    <span class="c1"># Set the model parameters using the lowered parameters produced by `relay.build`.</span>
    <span class="n">graph_mod</span><span class="o">.</span><span class="n">set_input</span><span class="p">(</span><span class="o">**</span><span class="n">module</span><span class="o">.</span><span class="n">get_params</span><span class="p">())</span>

    <span class="c1"># The model consumes a single float32 value and returns a predicted sine value.  To pass the</span>
    <span class="c1"># input value we construct a tvm.nd.array object with a single contrived number as input. For</span>
    <span class="c1"># this model values of 0 to 2Pi are acceptable.</span>
    <span class="n">graph_mod</span><span class="o">.</span><span class="n">set_input</span><span class="p">(</span><span class="n">input_tensor</span><span class="p">,</span> <span class="n">tvm</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="mf">0.5</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="s2">&quot;float32&quot;</span><span class="p">)))</span>
    <span class="n">graph_mod</span><span class="o">.</span><span class="n">run</span><span class="p">()</span>

    <span class="n">tvm_output</span> <span class="o">=</span> <span class="n">graph_mod</span><span class="o">.</span><span class="n">get_output</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">numpy</span><span class="p">()</span>
    <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;result is: &quot;</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">tvm_output</span><span class="p">))</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>result is: [[0.4443792]]
</pre></div>
</div>
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-how-to-work-with-microtvm-micro-tflite-py">
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../../_downloads/2fb9ae7bf124f72614a43137cf2919cb/micro_tflite.py"><code class="xref download docutils literal notranslate"><span class="pre">下载</span> <span class="pre">Python</span> <span class="pre">源代码:</span> <span class="pre">micro_tflite.py</span></code></a></p>
</div>
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../../_downloads/5b279d8a8718816263fa65b0eef1a5c0/micro_tflite.ipynb"><code class="xref download docutils literal notranslate"><span class="pre">下载</span> <span class="pre">Jupyter</span> <span class="pre">notebook:</span> <span class="pre">micro_tflite.ipynb</span></code></a></p>
</div>
</div>
<p class="sphx-glr-signature"><a class="reference external" href="https://sphinx-gallery.github.io">Gallery generated by Sphinx-Gallery</a></p>
</div>
</div>


           </div>
           
          </div>
          

<footer>

    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="../extend_tvm/index.html" class="btn btn-neutral float-right" title="Extend TVM" accesskey="n" rel="next">下一个 <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="micro_reference_vm.html" class="btn btn-neutral float-left" title="microTVM 虚拟机" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> 上一个</a>
      
    </div>

<div id="button" class="backtop"><img src="../../_static//img/right.svg" alt="backtop"/> </div>
<section class="footerSec">
    <div class="footerHeader">
      <ul class="d-flex align-md-items-center justify-content-between flex-column flex-md-row">
        <li class="copywrite d-flex align-items-center">
          <h5 id="copy-right-info">© 2020 Apache Software Foundation | All right reserved</h5>
        </li>
      </ul>

    </div>

    <ul>
      <li class="footernote">Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather, and the Apache TVM project logo are either trademarks or registered trademarks of the Apache Software Foundation.</li>
    </ul>

</section>
</footer>
        </div>
      </div>

    </section>

  </div>
  

    <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script>
    <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" crossorigin="anonymous"></script>

  </body>
  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
    <!-- Theme Analytics -->
    <script>
    (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
      (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
      m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
    })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

    ga('create', 'UA-75982049-2', 'auto');
    ga('send', 'pageview');
    </script>

    
   

</body>
</html>