





<!DOCTYPE html>
<html class="writer-html5" lang="zh-CN" >
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>使用TVMC编译和优化一个模型 &mdash; tvm 0.8.dev1982 文档</title>
  

  
  <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/gallery.css" type="text/css" />
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tlcpack_theme.css" type="text/css" />

  
  
    <link rel="shortcut icon" href="../_static/tvm-logo-square.png"/>
  

  
  
  
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
        <script data-url_root="../" id="documentation_options" src="../_static/documentation_options.js"></script>
        <script src="../_static/jquery.js"></script>
        <script src="../_static/underscore.js"></script>
        <script src="../_static/doctools.js"></script>
        <script src="../_static/translations.js"></script>
    
    <script type="text/javascript" src="../_static/js/theme.js"></script>

    
    <script type="text/javascript" src="../_static/js/tlcpack_theme.js"></script>
    <link rel="index" title="索引" href="../genindex.html" />
    <link rel="search" title="搜索" href="../search.html" />
    <link rel="next" title="Compiling and Optimizing a Model with the Python Interface (AutoTVM)" href="autotvm_relay_x86.html" />
    <link rel="prev" title="安装 TVM" href="install.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    
<header class="header">
    <div class="innercontainer">
      <div class="headerInner d-flex justify-content-between align-items-center">
          <div class="headerLogo">
               <a href="https://tvm.apache.org/"><img src=https://tvm.apache.org/assets/images/logo.svg alt="logo"></a>
          </div>

          <div id="headMenu" class="headerNav">
            <button type="button" id="closeHeadMenu" class="navCloseBtn"><img src="../_static/img/close-icon.svg" alt="Close"></button>
             <ul class="nav">
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/community>Community</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/download>Download</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/vta>VTA</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/blog>Blog</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/docs>Docs</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmconf.org>Conference</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://github.com/apache/tvm/>Github</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmchinese.github.io/declaration_zh_CN.html>About-Translators</a>
                </li>
             </ul>
               <div class="responsivetlcdropdown">
                 <button type="button" class="btn-link">
                   ASF
                 </button>
                 <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                 </ul>
               </div>
          </div>
            <div class="responsiveMenuIcon">
              <button type="button" id="menuBtn" class="btn-menu"><img src="../_static/img/menu-icon.svg" alt="Menu Icon"></button>
            </div>

            <div class="tlcDropdown">
              <div class="dropdown">
                <button type="button" class="btn-link dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
                  ASF
                </button>
                <div class="dropdown-menu dropdown-menu-right">
                  <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                  </ul>
                </div>
              </div>
          </div>
       </div>
    </div>
 </header>
 
    <nav data-toggle="wy-nav-shift" class="wy-nav-side fixed">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../index.html">
          

          
            
            <img src="../_static/tvm-logo-small.png" class="logo" alt="Logo"/>
          
          </a>

          
            
            
                <div class="version">
                  0.8.dev1982
                </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption" role="heading"><span class="caption-text">如何开始</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../install/index.html">安装 TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../contribute/index.html">贡献者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">用户引导</span></p>
<ul class="current">
<li class="toctree-l1 current"><a class="reference internal" href="index.html">User Tutorial</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="introduction.html">介绍</a></li>
<li class="toctree-l2"><a class="reference internal" href="introduction.html#an-overview-of-tvm-and-model-optimization">TVM和模型优化的概述</a></li>
<li class="toctree-l2"><a class="reference internal" href="install.html">安装 TVM</a></li>
<li class="toctree-l2 current"><a class="current reference internal" href="#">使用TVMC编译和优化一个模型</a><ul>
<li class="toctree-l3"><a class="reference internal" href="#using-tvmc">使用TVMC</a></li>
<li class="toctree-l3"><a class="reference internal" href="#obtaining-the-model">获取模型</a></li>
<li class="toctree-l3"><a class="reference internal" href="#compiling-an-onnx-model-to-the-tvm-runtime">编译一个ONNX模型到TVM运行时</a></li>
<li class="toctree-l3"><a class="reference internal" href="#running-the-model-from-the-compiled-module-with-tvmc">使用TVMC运行编译好的模型</a><ul>
<li class="toctree-l4"><a class="reference internal" href="#input-pre-processing">输入前处理</a></li>
<li class="toctree-l4"><a class="reference internal" href="#running-the-compiled-module">运行编译好的模块</a></li>
<li class="toctree-l4"><a class="reference internal" href="#output-post-processing">输入后处理</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="#automatically-tuning-the-resnet-model">自动调整ResNet模型</a></li>
<li class="toctree-l3"><a class="reference internal" href="#compiling-an-optimized-model-with-tuning-data">使用调优数据编译一个优化后的模型</a></li>
<li class="toctree-l3"><a class="reference internal" href="#comparing-the-tuned-and-untuned-models">比较调优和未调优的模型</a></li>
<li class="toctree-l3"><a class="reference internal" href="#final-remarks">结语</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="autotvm_relay_x86.html">Compiling and Optimizing a Model with the Python Interface (AutoTVM)</a></li>
<li class="toctree-l2"><a class="reference internal" href="tensor_expr_get_started.html">使用张量表达式来处理运算符</a></li>
<li class="toctree-l2"><a class="reference internal" href="autotvm_matmul_x86.html">Optimizing Operators with Schedule Templates and AutoTVM</a></li>
<li class="toctree-l2"><a class="reference internal" href="auto_scheduler_matmul_x86.html">Optimizing Operators with Auto-scheduling</a></li>
<li class="toctree-l2"><a class="reference internal" href="cross_compilation_and_rpc.html">Cross Compilation and RPC</a></li>
<li class="toctree-l2"><a class="reference internal" href="relay_quick_start.html">编译深度学习模型的快速开始教程</a></li>
<li class="toctree-l2"><a class="reference internal" href="intro_topi.html">Introduction to TOPI</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../how_to/index.html">How To Guides</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">开发者引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../dev/tutorial/index.html">Developer Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../dev/how_to/how_to.html">开发者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">架构指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../arch/index.html">Design and Architecture</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">主题引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../topic/microtvm/index.html">microTVM：裸机使用TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../topic/vta/index.html">VTA: Versatile Tensor Accelerator</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">参考指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../reference/langref/index.html">语言参考</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/api/python/index.html">Python API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/api/links.html">Other APIs</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/publications.html">Publications</a></li>
<li class="toctree-l1"><a class="reference internal" href="../genindex.html">索引</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
      
      <nav class="wy-nav-top" aria-label="top navigation" data-toggle="wy-nav-top">
        
            <div class="togglemenu">

            </div>
            <div class="nav-content">
              <!-- tvm -->
              Table of content
            </div>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        

          




















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../index.html">Docs</a> <span class="br-arrow">></span></li>
        
          <li><a href="index.html">User Tutorial</a> <span class="br-arrow">></span></li>
        
      <li>使用TVMC编译和优化一个模型</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
            <a href="../_sources/tutorial/tvmc_command_line_driver.rst.txt" rel="nofollow"> <img src="../_static//img/source.svg" alt="viewsource"/></a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="sphx-glr-download-link-note admonition note">
<p class="admonition-title">注解</p>
<p>点击 <a class="reference internal" href="#sphx-glr-download-tutorial-tvmc-command-line-driver-py"><span class="std std-ref">这里</span></a> 下载完整的样例代码</p>
</div>
<div class="sphx-glr-example-title section" id="compiling-and-optimizing-a-model-with-tvmc">
<span id="sphx-glr-tutorial-tvmc-command-line-driver-py"></span><h1>使用TVMC编译和优化一个模型<a class="headerlink" href="#compiling-and-optimizing-a-model-with-tvmc" title="永久链接至标题">¶</a></h1>
<p><strong>作者</strong>: <a class="reference external" href="https://github.com/leandron">Leandro Nunes</a>, <a class="reference external" href="https://github.com/mbaret">Matthew Barrett</a>, <a class="reference external" href="https://github.com/hogepodge">Chris Hoge</a></p>
<p>在这个教程中，我们将使用TVMC（TVM命令行驱动程序）。TVMC是一个将TVM的一些特性比如自动调优，编译，分析和运行模型等通过命令行前端暴露出来的工具。</p>
<p>在本教程中，我们会基于TVMC完成以下任务：</p>
<ul class="simple">
<li><p>将一个预训练的ResNet50 V2模型编译为TVM运行时。</p></li>
<li><p>基于编译完成的模型运行一张真实的图片，并分析输出和模型的性能。</p></li>
<li><p>使用TVM在CPU上对模型调优。</p></li>
<li><p>基于TVM收集的调优数据再编译一个优化后的模型。</p></li>
<li><p>基于优化后的模型运行一张真实的图片，并且对比输出和性能（这里是和未优化的模型进行对比）。</p></li>
</ul>
<p>这一节的目标是向你概述TVM和TVMC的功能，并为了解TVM的工作原理奠定基础。</p>
<div class="section" id="using-tvmc">
<h2>使用TVMC<a class="headerlink" href="#using-tvmc" title="永久链接至标题">¶</a></h2>
<p>TVMC是一个Python应用程序，是TVM Python包的一部分。当你使用Python包安装TVM时，您将获得一个叫作``tvmc``的命令行应用程序。此命令的位置将根据你的平台和安装方法而各有不同。</p>
<p>或者，如果你将TVM编译为在``$PYTHONPATH`` 中的Python模块，你可以通过下面的命令来启动TVMC命令行驱动程序`python -m tvm.driver.tvmc``。</p>
<p>简单起见，本教程以``tvmc <a href="#id1"><span class="problematic" id="id2">``</span></a>的方式提起TVMC命令行，但使用``python -m tvm.driver.tvmc <a href="#id3"><span class="problematic" id="id4">``</span></a>可以获得相同的结果。</p>
<p>你可以使用以下方法查看帮助页面：</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>tvmc --help
</pre></div>
</div>
<p>The main features of TVM available to <code class="docutils literal notranslate"><span class="pre">tvmc</span></code> are from subcommands
<code class="docutils literal notranslate"><span class="pre">compile</span></code>, and <code class="docutils literal notranslate"><span class="pre">run</span></code>, and <code class="docutils literal notranslate"><span class="pre">tune</span></code>.  To read about specific options under
a given subcommand, use <code class="docutils literal notranslate"><span class="pre">tvmc</span> <span class="pre">&lt;subcommand&gt;</span> <span class="pre">--help</span></code>. We will cover each of
these commands in this tutorial, but first we need to download a pre-trained
model to work with.</p>
</div>
<div class="section" id="obtaining-the-model">
<h2>获取模型<a class="headerlink" href="#obtaining-the-model" title="永久链接至标题">¶</a></h2>
<p>在本教程中，我们将使用ResNet-50 v2。ResNet-50是一个深度为50层的卷积神经网络，被用于图像分类。我们将要使用的模型已经在超过100万张具有1000种的不同分类图像上进行了预训练。该网络的输入大小是224x224。如果你对ResNet-50的结构很感兴趣，我们建议你下载 <cite>Netron &lt;https://netron.app&gt;</cite>,它是一款免费的ML模型可视化软件。</p>
<p>在本教程中，我们将使用ONNX格式的模型。</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>wget https://github.com/onnx/models/raw/master/vision/classification/resnet/model/resnet50-v2-7.onnx
</pre></div>
</div>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>已支持的模型格式。</p>
<p>TVMC支持使用Keras，ONNX，TensorFlow，TFLite和Torch创建的模型。如果你需要明确提供你正在使用的模型格式，请使用选项``–model-format``。 有关更多信息，请参阅“tvmc compile –help”。</p>
</div>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>给TVM添加ONNX支持</p>
<p>TVM依赖于你系统上有可用的ONNX Python库。你可以使用命令``pip3 install –user onnx``来安装ONNX。如果你具有root权限并且想全局安装ONNX，则可以删除``–user`` 选项。  <code class="docutils literal notranslate"><span class="pre">onnxoptimizer</span></code> 依赖是可选的，仅用于 <code class="docutils literal notranslate"><span class="pre">onnx&gt;=1.9</span></code>。</p>
</div>
</div>
<div class="section" id="compiling-an-onnx-model-to-the-tvm-runtime">
<h2>编译一个ONNX模型到TVM运行时<a class="headerlink" href="#compiling-an-onnx-model-to-the-tvm-runtime" title="永久链接至标题">¶</a></h2>
<p>一旦我们下载好ResNet-50模型，下一步就是编译它。需要调用``tvmc compile``.来实现它。从编译过程中获得的输出是一个将模型编译为目标平台动态库的TAR包。我们可以使用TVM运行时在的目标设备上运行该模型。</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>tvmc compile <span class="se">\</span>
--target <span class="s2">&quot;llvm&quot;</span> <span class="se">\</span>
--output resnet50-v2-7-tvm.tar <span class="se">\</span>
resnet50-v2-7.onnx
</pre></div>
</div>
<p>我们来看看 <code class="docutils literal notranslate"><span class="pre">tvmc</span> <span class="pre">compile</span></code> 在模块中创建的文件：</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>mkdir model
tar -xvf resnet50-v2-7-tvm.tar -C model
ls model
</pre></div>
</div>
<p>你将看到列出的三个文件。</p>
<ul class="simple">
<li><p><cite>mod.so`</cite> 是模型，用一个可以由TVM运行时加载的C++库来表示。</p></li>
<li><p><a href="#id1"><span class="problematic" id="id2">``</span></a>mod.json``是TVM Relay计算图的文本表示。</p></li>
<li><p><a href="#id1"><span class="problematic" id="id2">``</span></a>mod.params``是一个包含预训练模型参数的文件。</p></li>
</ul>
<p>该模块可以由你的应用程序直接加载，并且模型可以通过TVM运行时APIs来运行。</p>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>定义正确的目标</p>
<p>指定正确的目标（选项``–target``）会对编译后的模块的性能产生巨大的影响，因为它可以利用目标上的可用硬件特性。有关更多信息，请参阅`给X86 CPU自动调优一个卷积神经网络 &lt;<a class="reference external" href="https://tvm.apache.org/docs/tutorials/autotvm/tune_relay_x86.html#define-network">https://tvm.apache.org/docs/tutorials/autotvm/tune_relay_x86.html#define-network</a>&gt;`_。我们建议指明你正在运行的CPU以及可选特点，并合适的设置目标。</p>
</div>
</div>
<div class="section" id="running-the-model-from-the-compiled-module-with-tvmc">
<h2>使用TVMC运行编译好的模型<a class="headerlink" href="#running-the-model-from-the-compiled-module-with-tvmc" title="永久链接至标题">¶</a></h2>
<p>现在我们已经将模型编译到了这个模块中，我们可以使用TVM运行时来进行预测。TVMC内置了TVM运行时，允许你运行编译好的TVM模型。要使用TVMC运行模型并进行预测，我们需要做两个东西:</p>
<ul class="simple">
<li><p>我们刚刚产生的编译好的模块。</p></li>
<li><p>模型进行预测的合法输入。</p></li>
</ul>
<p>当涉及到预期的张量形状，格式和数据类型时每个模型都是特殊的。出于这个原因，大多数模型需要一些预处理和后处理，以确保输入有效并解释输出。TVMC对输入和输出数据都采用了NumPy的``.npz`` 格式。这是一种得到良好支持的NumPy格式，用于将多个数组序列化为一个文件。</p>
<p>作为本教程的输入，我们将使用猫的图片，但你可以随意替换为任何你选择的图片。</p>
<a class="reference internal image-reference" href="https://s3.amazonaws.com/model-server/inputs/kitten.jpg"><img alt="https://s3.amazonaws.com/model-server/inputs/kitten.jpg" class="align-center" src="https://s3.amazonaws.com/model-server/inputs/kitten.jpg" style="width: 224px; height: 224px;" /></a>
<div class="section" id="input-pre-processing">
<h3>输入前处理<a class="headerlink" href="#input-pre-processing" title="永久链接至标题">¶</a></h3>
<p>对于我们的ResNet-50 V2模型，输入被期望是ImageNet格式。下面是为ResNet-50 V2预处理图形的脚本示例。</p>
<p>你需要安装一个支持的Python图像库。你可以使用 <code class="docutils literal notranslate"><span class="pre">pip3</span> <span class="pre">install</span> <span class="pre">--user</span> <span class="pre">pillow</span></code> 来满足脚本的这个需求。</p>
<div class="literal-block-wrapper docutils container" id="preprocess-py">
<div class="code-block-caption"><span class="caption-text">preprocess.py</span><a class="headerlink" href="#preprocess-py" title="永久链接至代码">¶</a></div>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span> <span class="c1">#!python ./preprocess.py</span>
 <span class="kn">from</span> <span class="nn">tvm.contrib.download</span> <span class="kn">import</span> <span class="n">download_testdata</span>
 <span class="kn">from</span> <span class="nn">PIL</span> <span class="kn">import</span> <span class="n">Image</span>
 <span class="kn">import</span> <span class="nn">numpy</span> <span class="kn">as</span> <span class="nn">np</span>

 <span class="n">img_url</span> <span class="o">=</span> <span class="s2">&quot;https://s3.amazonaws.com/model-server/inputs/kitten.jpg&quot;</span>
 <span class="n">img_path</span> <span class="o">=</span> <span class="n">download_testdata</span><span class="p">(</span><span class="n">img_url</span><span class="p">,</span> <span class="s2">&quot;imagenet_cat.png&quot;</span><span class="p">,</span> <span class="n">module</span><span class="o">=</span><span class="s2">&quot;data&quot;</span><span class="p">)</span>

 <span class="c1"># Resize it to 224x224</span>
 <span class="n">resized_image</span> <span class="o">=</span> <span class="n">Image</span><span class="o">.</span><span class="n">open</span><span class="p">(</span><span class="n">img_path</span><span class="p">)</span><span class="o">.</span><span class="n">resize</span><span class="p">((</span><span class="mi">224</span><span class="p">,</span> <span class="mi">224</span><span class="p">))</span>
 <span class="n">img_data</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">asarray</span><span class="p">(</span><span class="n">resized_image</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">)</span>

 <span class="c1"># ONNX expects NCHW input, so convert the array</span>
 <span class="n">img_data</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">transpose</span><span class="p">(</span><span class="n">img_data</span><span class="p">,</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>

 <span class="c1"># Normalize according to ImageNet</span>
 <span class="n">imagenet_mean</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="mf">0.485</span><span class="p">,</span> <span class="mf">0.456</span><span class="p">,</span> <span class="mf">0.406</span><span class="p">])</span>
 <span class="n">imagenet_stddev</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="mf">0.229</span><span class="p">,</span> <span class="mf">0.224</span><span class="p">,</span> <span class="mf">0.225</span><span class="p">])</span>
 <span class="n">norm_img_data</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">img_data</span><span class="o">.</span><span class="n">shape</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;float32&quot;</span><span class="p">)</span>
 <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">img_data</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]):</span>
         <span class="n">norm_img_data</span><span class="p">[</span><span class="n">i</span><span class="p">,</span> <span class="p">:,</span> <span class="p">:]</span> <span class="o">=</span> <span class="p">(</span><span class="n">img_data</span><span class="p">[</span><span class="n">i</span><span class="p">,</span> <span class="p">:,</span> <span class="p">:]</span> <span class="o">/</span> <span class="mi">255</span> <span class="o">-</span> <span class="n">imagenet_mean</span><span class="p">[</span><span class="n">i</span><span class="p">])</span> <span class="o">/</span> <span class="n">imagenet_stddev</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>

 <span class="c1"># Add batch dimension</span>
 <span class="n">img_data</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">expand_dims</span><span class="p">(</span><span class="n">norm_img_data</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span>

 <span class="c1"># Save to .npz (outputs imagenet_cat.npz)</span>
 <span class="n">np</span><span class="o">.</span><span class="n">savez</span><span class="p">(</span><span class="s2">&quot;imagenet_cat&quot;</span><span class="p">,</span> <span class="n">data</span><span class="o">=</span><span class="n">img_data</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
<div class="section" id="running-the-compiled-module">
<h3>运行编译好的模块<a class="headerlink" href="#running-the-compiled-module" title="永久链接至标题">¶</a></h3>
<p>有了模型和输入数据，我们现在可以运行TVMC来进行预测：</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>tvmc run <span class="se">\</span>
--inputs imagenet_cat.npz <span class="se">\</span>
--output predictions.npz <span class="se">\</span>
resnet50-v2-7-tvm.tar
</pre></div>
</div>
<p>回想一下，<cite>.tar</cite> 模型文件包含一个C++库，一个Relay模型的描述和模型的参数。TVMC包括TVM运行时，它可以加载模型并根据输入进行预测。运行上述命令时，TVMC会输出一个新文件``predictions.npz``，其中包含NumPy格式的模型输出张量。</p>
<p>在此示例中，我们在用于编译的同一台机器上运行模式。在某些情况下，我们可能希望通过RPC Tracker远程运行它。要阅读有关这些选项的更多信息，请检查``tvmc run –help``。</p>
</div>
<div class="section" id="output-post-processing">
<h3>输入后处理<a class="headerlink" href="#output-post-processing" title="永久链接至标题">¶</a></h3>
<p>如前所述，每个模型都有自己特定的提供输出张量的方式。</p>
<p>在我们的例子中，我们会基于为模型提供的查找表执行一些后处理来使得ResNet-50 V2的输出呈现为更易读的形式。</p>
<p>下面的脚本显示了从我们的编译后模块的输出中提取标签的后处理示例。</p>
<div class="literal-block-wrapper docutils container" id="postprocess-py">
<div class="code-block-caption"><span class="caption-text">postprocess.py</span><a class="headerlink" href="#postprocess-py" title="永久链接至代码">¶</a></div>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="ch">#!python ./postprocess.py</span>
<span class="kn">import</span> <span class="nn">os.path</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="kn">as</span> <span class="nn">np</span>

<span class="kn">from</span> <span class="nn">scipy.special</span> <span class="kn">import</span> <span class="n">softmax</span>

<span class="kn">from</span> <span class="nn">tvm.contrib.download</span> <span class="kn">import</span> <span class="n">download_testdata</span>

<span class="c1"># Download a list of labels</span>
<span class="n">labels_url</span> <span class="o">=</span> <span class="s2">&quot;https://s3.amazonaws.com/onnx-model-zoo/synset.txt&quot;</span>
<span class="n">labels_path</span> <span class="o">=</span> <span class="n">download_testdata</span><span class="p">(</span><span class="n">labels_url</span><span class="p">,</span> <span class="s2">&quot;synset.txt&quot;</span><span class="p">,</span> <span class="n">module</span><span class="o">=</span><span class="s2">&quot;data&quot;</span><span class="p">)</span>

<span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="n">labels_path</span><span class="p">,</span> <span class="s2">&quot;r&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">f</span><span class="p">:</span>
    <span class="n">labels</span> <span class="o">=</span> <span class="p">[</span><span class="n">l</span><span class="o">.</span><span class="n">rstrip</span><span class="p">()</span> <span class="k">for</span> <span class="n">l</span> <span class="ow">in</span> <span class="n">f</span><span class="p">]</span>

<span class="n">output_file</span> <span class="o">=</span> <span class="s2">&quot;predictions.npz&quot;</span>

<span class="c1"># Open the output and read the output tensor</span>
<span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span><span class="n">output_file</span><span class="p">):</span>
    <span class="k">with</span> <span class="n">np</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">output_file</span><span class="p">)</span> <span class="k">as</span> <span class="n">data</span><span class="p">:</span>
        <span class="n">scores</span> <span class="o">=</span> <span class="n">softmax</span><span class="p">(</span><span class="n">data</span><span class="p">[</span><span class="s2">&quot;output_0&quot;</span><span class="p">])</span>
        <span class="n">scores</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">squeeze</span><span class="p">(</span><span class="n">scores</span><span class="p">)</span>
        <span class="n">ranks</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">argsort</span><span class="p">(</span><span class="n">scores</span><span class="p">)[::</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>

        <span class="k">for</span> <span class="n">rank</span> <span class="ow">in</span> <span class="n">ranks</span><span class="p">[</span><span class="mi">0</span><span class="p">:</span><span class="mi">5</span><span class="p">]:</span>
            <span class="k">print</span><span class="p">(</span><span class="s2">&quot;class=&#39;</span><span class="si">%s</span><span class="s2">&#39; with probability=</span><span class="si">%f</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">labels</span><span class="p">[</span><span class="n">rank</span><span class="p">],</span> <span class="n">scores</span><span class="p">[</span><span class="n">rank</span><span class="p">]))</span>
</pre></div>
</div>
</div>
<p>运行这个脚本会产生下面的输出：</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>python postprocess.py

<span class="c1"># class=&#39;n02123045 tabby, tabby cat&#39; with probability=0.610553</span>
<span class="c1"># class=&#39;n02123159 tiger cat&#39; with probability=0.367179</span>
<span class="c1"># class=&#39;n02124075 Egyptian cat&#39; with probability=0.019365</span>
<span class="c1"># class=&#39;n02129604 tiger, Panthera tigris&#39; with probability=0.001273</span>
<span class="c1"># class=&#39;n04040759 radiator&#39; with probability=0.000261</span>
</pre></div>
</div>
<p>尝试用其它图形替换猫的图像，看看ResNet模型会做出什么样的预测。</p>
</div>
</div>
<div class="section" id="automatically-tuning-the-resnet-model">
<h2>自动调整ResNet模型<a class="headerlink" href="#automatically-tuning-the-resnet-model" title="永久链接至标题">¶</a></h2>
<p>之前的模型被编译为在TVM运行时工作，但不包括任何特定平台的优化。在本节中，我们将向你展示如何使用TVMC构建基于你工作平台的优化模型。</p>
<p>在某些情况下，使用我们编译的模块进行推理时，我们可能无法获得预期的性能。在这种情况下，我们可以利用自动调优器为我们的模型找到更好的配置并提示性能。TVM中的调优是指在给定目标上优化模型使其运行更快的过程。这与训练和微调的不同之处在于，它不会影响模型的准确率，而只会影响运行时性能。作为调优过程的一部分，TVM将尝试运行许多不同的算子实现变体来看看哪个性能最佳。这些运行结果存储在调整记录文件中，该文件最终是 <a href="#id1"><span class="problematic" id="id2">``</span></a>tune``子命令的输出。</p>
<p>以最简形式，调优需要你提供三个东西：</p>
<ul class="simple">
<li><p>你打算将这个模型运行运行在哪个目标设备</p></li>
<li><p>最后调优文件将被存储的路径</p></li>
<li><p>要调整的模型的路径</p></li>
</ul>
<p>下面的例子演示了它实际上是怎么工作的：</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>tvmc tune <span class="se">\</span>
--target <span class="s2">&quot;llvm&quot;</span> <span class="se">\</span>
--output resnet50-v2-7-autotuner_records.json <span class="se">\</span>
resnet50-v2-7.onnx
</pre></div>
</div>
<p>在这个例子中，如果你为`–target`标志指定一个具体的目标，你会看到更好的结果。比如，在Intel i7处理器上，你可以使用 <cite>–target llvm -mcpu=skylake</cite>。对于此调优示例，我们将LLVM作为指定架构的编译器在CPU上进行本地调优。</p>
<p>TVMC将针对模型的参数空间执行搜索，为算子尝试不同的配置并选择在你的平台上运行最快的配置。虽然这是基于CPU和模型算子的启发式搜索，但仍可能需要几个小时才能完成搜索。此搜索的输出将保存到`resnet50-v2-7-autotuner_records.json` 文件，稍后将用于编译一个优化后的模型。</p>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>定义调优搜索算法</p>
<p>默认情况下，此搜索使用`XGBoost Grid` 算法。根据你的模型的复杂性和可用时间，你可能需要选择不同的算法。完整列表可以通过``tvmc tune –help``获得。</p>
</div>
<p>对于消费级的Skylake CPU，输出将如下所示：</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>tvmc tune   --target <span class="s2">&quot;llvm -mcpu=broadwell&quot;</span>   --output resnet50-v2-7-autotuner_records.json   resnet50-v2-7.onnx
<span class="c1"># [Task  1/24]  Current/Best:    9.65/  23.16 GFLOPS | Progress: (60/1000) | 130.74 s Done.</span>
<span class="c1"># [Task  1/24]  Current/Best:    3.56/  23.16 GFLOPS | Progress: (192/1000) | 381.32 s Done.</span>
<span class="c1"># [Task  2/24]  Current/Best:   13.13/  58.61 GFLOPS | Progress: (960/1000) | 1190.59 s Done.</span>
<span class="c1"># [Task  3/24]  Current/Best:   31.93/  59.52 GFLOPS | Progress: (800/1000) | 727.85 s Done.</span>
<span class="c1"># [Task  4/24]  Current/Best:   16.42/  57.80 GFLOPS | Progress: (960/1000) | 559.74 s Done.</span>
<span class="c1"># [Task  5/24]  Current/Best:   12.42/  57.92 GFLOPS | Progress: (800/1000) | 766.63 s Done.</span>
<span class="c1"># [Task  6/24]  Current/Best:   20.66/  59.25 GFLOPS | Progress: (1000/1000) | 673.61 s Done.</span>
<span class="c1"># [Task  7/24]  Current/Best:   15.48/  59.60 GFLOPS | Progress: (1000/1000) | 953.04 s Done.</span>
<span class="c1"># [Task  8/24]  Current/Best:   31.97/  59.33 GFLOPS | Progress: (972/1000) | 559.57 s Done.</span>
<span class="c1"># [Task  9/24]  Current/Best:   34.14/  60.09 GFLOPS | Progress: (1000/1000) | 479.32 s Done.</span>
<span class="c1"># [Task 10/24]  Current/Best:   12.53/  58.97 GFLOPS | Progress: (972/1000) | 642.34 s Done.</span>
<span class="c1"># [Task 11/24]  Current/Best:   30.94/  58.47 GFLOPS | Progress: (1000/1000) | 648.26 s Done.</span>
<span class="c1"># [Task 12/24]  Current/Best:   23.66/  58.63 GFLOPS | Progress: (1000/1000) | 851.59 s Done.</span>
<span class="c1"># [Task 13/24]  Current/Best:   25.44/  59.76 GFLOPS | Progress: (1000/1000) | 534.58 s Done.</span>
<span class="c1"># [Task 14/24]  Current/Best:   26.83/  58.51 GFLOPS | Progress: (1000/1000) | 491.67 s Done.</span>
<span class="c1"># [Task 15/24]  Current/Best:   33.64/  58.55 GFLOPS | Progress: (1000/1000) | 529.85 s Done.</span>
<span class="c1"># [Task 16/24]  Current/Best:   14.93/  57.94 GFLOPS | Progress: (1000/1000) | 645.55 s Done.</span>
<span class="c1"># [Task 17/24]  Current/Best:   28.70/  58.19 GFLOPS | Progress: (1000/1000) | 756.88 s Done.</span>
<span class="c1"># [Task 18/24]  Current/Best:   19.01/  60.43 GFLOPS | Progress: (980/1000) | 514.69 s Done.</span>
<span class="c1"># [Task 19/24]  Current/Best:   14.61/  57.30 GFLOPS | Progress: (1000/1000) | 614.44 s Done.</span>
<span class="c1"># [Task 20/24]  Current/Best:   10.47/  57.68 GFLOPS | Progress: (980/1000) | 479.80 s Done.</span>
<span class="c1"># [Task 21/24]  Current/Best:   34.37/  58.28 GFLOPS | Progress: (308/1000) | 225.37 s Done.</span>
<span class="c1"># [Task 22/24]  Current/Best:   15.75/  57.71 GFLOPS | Progress: (1000/1000) | 1024.05 s Done.</span>
<span class="c1"># [Task 23/24]  Current/Best:   23.23/  58.92 GFLOPS | Progress: (1000/1000) | 999.34 s Done.</span>
<span class="c1"># [Task 24/24]  Current/Best:   17.27/  55.25 GFLOPS | Progress: (1000/1000) | 1428.74 s Done.</span>
</pre></div>
</div>
<p>调整会话可能需要很长时间，因此 <code class="docutils literal notranslate"><span class="pre">tvmc</span> <span class="pre">tune``提供了许多选项来自定义你的调整过程，就重复次数而已（例如，</span></code>–repeat`` 和–number`），要使用的调整算法等等。检查``tvmc tune –help``获得更多信息。</p>
</div>
<div class="section" id="compiling-an-optimized-model-with-tuning-data">
<h2>使用调优数据编译一个优化后的模型<a class="headerlink" href="#compiling-an-optimized-model-with-tuning-data" title="永久链接至标题">¶</a></h2>
<p>作为上述调优过程的输出，我们获得了存储在``resnet50-v2-7-autotuner_records.json``文件中的调优记录。该文件可以通过两种方式使用：</p>
<ul class="simple">
<li><p>作为进一步调优的输入（通过 <code class="docutils literal notranslate"><span class="pre">tvmc</span> <span class="pre">tune</span> <span class="pre">--tuning-records</span></code>）</p></li>
<li><p>作为编译器的输入</p></li>
</ul>
<p>编译器将使用调优结果为指定目标上的模型生成高性能代码。为此，我们可以用``tvmc compile –tuning-records``。 检查 <code class="docutils literal notranslate"><span class="pre">tvmc</span> <span class="pre">compile</span> <span class="pre">--help</span></code> 以获取更多信息。</p>
<p>现在我们已经收集了模型的调优数据，我们可以使用优化后的算子来重新编译模型以加快运算速度。</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>tvmc compile <span class="se">\</span>
--target <span class="s2">&quot;llvm&quot;</span> <span class="se">\</span>
--tuning-records resnet50-v2-7-autotuner_records.json  <span class="se">\</span>
--output resnet50-v2-7-tvm_autotuned.tar <span class="se">\</span>
resnet50-v2-7.onnx
</pre></div>
</div>
<p>验证优化后模型运行并产生相同的结果：</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>tvmc run <span class="se">\</span>
--inputs imagenet_cat.npz <span class="se">\</span>
--output predictions.npz <span class="se">\</span>
resnet50-v2-7-tvm_autotuned.tar

python postprocess.py
</pre></div>
</div>
<p>验证预测结果是相同的：</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span><span class="c1"># class=&#39;n02123045 tabby, tabby cat&#39; with probability=0.610550</span>
<span class="c1"># class=&#39;n02123159 tiger cat&#39; with probability=0.367181</span>
<span class="c1"># class=&#39;n02124075 Egyptian cat&#39; with probability=0.019365</span>
<span class="c1"># class=&#39;n02129604 tiger, Panthera tigris&#39; with probability=0.001273</span>
<span class="c1"># class=&#39;n04040759 radiator&#39; with probability=0.000261</span>
</pre></div>
</div>
</div>
<div class="section" id="comparing-the-tuned-and-untuned-models">
<h2>比较调优和未调优的模型<a class="headerlink" href="#comparing-the-tuned-and-untuned-models" title="永久链接至标题">¶</a></h2>
<p>TVMC为你提供了再模型之间进行基本性能基准测试的工具。你可以指定重复次数然后TVMC会报告模型的运行时间（独立于运行时启动）。我们可以大概了解调优对模型性能的改进程度。例如，在Intel i7的测试中，我们看到调优后的模型运行速度比调优前快47%。</p>
<div class="highlight-bash notranslate"><div class="highlight"><pre><span></span>tvmc run <span class="se">\</span>
--inputs imagenet_cat.npz <span class="se">\</span>
--output predictions.npz  <span class="se">\</span>
--print-time <span class="se">\</span>
--repeat <span class="m">100</span> <span class="se">\</span>
resnet50-v2-7-tvm_autotuned.tar

<span class="c1"># Execution time summary:</span>
<span class="c1"># mean (ms)   max (ms)    min (ms)    std (ms)</span>
<span class="c1">#     92.19     115.73       89.85        3.15</span>

tvmc run <span class="se">\</span>
--inputs imagenet_cat.npz <span class="se">\</span>
--output predictions.npz  <span class="se">\</span>
--print-time <span class="se">\</span>
--repeat <span class="m">100</span> <span class="se">\</span>
resnet50-v2-7-tvm.tar

<span class="c1"># Execution time summary:</span>
<span class="c1"># mean (ms)   max (ms)    min (ms)    std (ms)</span>
<span class="c1">#    193.32     219.97      185.04        7.11</span>
</pre></div>
</div>
</div>
<div class="section" id="final-remarks">
<h2>结语<a class="headerlink" href="#final-remarks" title="永久链接至标题">¶</a></h2>
<p>在本教程中，我们介绍了 TVMC，这是 TVM 的命令行驱动程序。 我们演示了如何编译、运行和调优模型。 我们还讨论了对输入和输出进行预处理和后处理的必要性。 在调优过程之后，我们演示了如何比较未优化和优化模型的性能。</p>
<p>这里我们展示了一个在本地使用 ResNet-50 V2 的简单示例。 但是，TVMC 支持更多功能，包括交叉编译、远程执行和分析/基准测试。</p>
<p>要查看其他可用选项，请查看 <code class="docutils literal notranslate"><span class="pre">tvmc</span> <span class="pre">--help</span></code>。</p>
<p>在下一个教程中, <a class="reference external" href="auto_tuning_with_pyton">使用 Python 接口编译和优化模型</a>, 我们将使用 Python 接口实现相同的编译和优化步骤。</p>
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-tutorial-tvmc-command-line-driver-py">
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../_downloads/233ceda3a682ae5df93b4ce0bcfbf870/tvmc_command_line_driver.py"><code class="xref download docutils literal notranslate"><span class="pre">下载</span> <span class="pre">Python</span> <span class="pre">源码:</span> <span class="pre">tvmc_command_line_driver.py</span></code></a></p>
</div>
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../_downloads/efe0b02e219b28e0bd85fbdda35ba8ac/tvmc_command_line_driver.ipynb"><code class="xref download docutils literal notranslate"><span class="pre">下载</span> <span class="pre">Jupyter</span> <span class="pre">notebook:</span> <span class="pre">tvmc_command_line_driver.ipynb</span></code></a></p>
</div>
</div>
<p class="sphx-glr-signature"><a class="reference external" href="https://sphinx-gallery.github.io">Gallery generated by Sphinx-Gallery</a></p>
</div>
</div>


           </div>
           
          </div>
          

<footer>

    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="autotvm_relay_x86.html" class="btn btn-neutral float-right" title="Compiling and Optimizing a Model with the Python Interface (AutoTVM)" accesskey="n" rel="next">下一个 <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="install.html" class="btn btn-neutral float-left" title="安装 TVM" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> 上一个</a>
      
    </div>

<div id="button" class="backtop"><img src="../_static//img/right.svg" alt="backtop"/> </div>
<section class="footerSec">
    <div class="footerHeader">
      <ul class="d-flex align-md-items-center justify-content-between flex-column flex-md-row">
        <li class="copywrite d-flex align-items-center">
          <h5 id="copy-right-info">© 2020 Apache Software Foundation | All right reserved</h5>
        </li>
      </ul>

    </div>

    <ul>
      <li class="footernote">Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather, and the Apache TVM project logo are either trademarks or registered trademarks of the Apache Software Foundation.</li>
    </ul>

</section>
</footer>
        </div>
      </div>

    </section>

  </div>
  

    <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script>
    <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" crossorigin="anonymous"></script>

  </body>
  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
    <!-- Theme Analytics -->
    <script>
    (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
      (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
      m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
    })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

    ga('create', 'UA-75982049-2', 'auto');
    ga('send', 'pageview');
    </script>

    
   

</body>
</html>