





<!DOCTYPE html>
<html class="writer-html5" lang="zh-CN" >
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>在CPU上部署Hugging Face Pruned模型 &mdash; tvm 0.8.dev1982 文档</title>
  

  
  <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
  <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/gallery.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/css/tlcpack_theme.css" type="text/css" />

  
  
    <link rel="shortcut icon" href="../../_static/tvm-logo-square.png"/>
  

  
  
  
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../" src="../../_static/documentation_options.js"></script>
        <script data-url_root="../../" id="documentation_options" src="../../_static/documentation_options.js"></script>
        <script src="../../_static/jquery.js"></script>
        <script src="../../_static/underscore.js"></script>
        <script src="../../_static/doctools.js"></script>
        <script src="../../_static/translations.js"></script>
    
    <script type="text/javascript" src="../../_static/js/theme.js"></script>

    
    <script type="text/javascript" src="../../_static/js/tlcpack_theme.js"></script>
    <link rel="index" title="索引" href="../../genindex.html" />
    <link rel="search" title="搜索" href="../../search.html" />
    <link rel="next" title="部署Single Shot Multibox Detector(SSD)模型" href="deploy_ssd_gluoncv.html" />
    <link rel="prev" title="在Cuda上部署一个量化模型" href="deploy_quantized.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    
<header class="header">
    <div class="innercontainer">
      <div class="headerInner d-flex justify-content-between align-items-center">
          <div class="headerLogo">
               <a href="https://tvm.apache.org/"><img src=https://tvm.apache.org/assets/images/logo.svg alt="logo"></a>
          </div>

          <div id="headMenu" class="headerNav">
            <button type="button" id="closeHeadMenu" class="navCloseBtn"><img src="../../_static/img/close-icon.svg" alt="Close"></button>
             <ul class="nav">
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/community>Community</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/download>Download</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/vta>VTA</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/blog>Blog</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvm.apache.org/docs>Docs</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmconf.org>Conference</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://github.com/apache/tvm/>Github</a>
                </li>
                <li class="nav-item">
                   <a class="nav-link" href=https://tvmchinese.github.io/declaration_zh_CN.html>About-Translators</a>
                </li>
             </ul>
               <div class="responsivetlcdropdown">
                 <button type="button" class="btn-link">
                   ASF
                 </button>
                 <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                 </ul>
               </div>
          </div>
            <div class="responsiveMenuIcon">
              <button type="button" id="menuBtn" class="btn-menu"><img src="../../_static/img/menu-icon.svg" alt="Menu Icon"></button>
            </div>

            <div class="tlcDropdown">
              <div class="dropdown">
                <button type="button" class="btn-link dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
                  ASF
                </button>
                <div class="dropdown-menu dropdown-menu-right">
                  <ul>
                     <li>
                       <a href=https://apache.org/>Apache Homepage</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/licenses/>License</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/sponsorship.html>Sponsorship</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/security/>Security</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/foundation/thanks.html>Thanks</a>
                     </li>
                     <li>
                       <a href=https://www.apache.org/events/current-event>Events</a>
                     </li>
                     <li>
                       <a href=https://www.zhihu.com/column/c_1429578595417563136>Zhihu</a>
                     </li>
                  </ul>
                </div>
              </div>
          </div>
       </div>
    </div>
 </header>
 
    <nav data-toggle="wy-nav-shift" class="wy-nav-side fixed">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../index.html">
          

          
            
            <img src="../../_static/tvm-logo-small.png" class="logo" alt="Logo"/>
          
          </a>

          
            
            
                <div class="version">
                  0.8.dev1982
                </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption" role="heading"><span class="caption-text">如何开始</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../install/index.html">安装 TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../contribute/index.html">贡献者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">用户引导</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../../tutorial/index.html">User Tutorial</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="../index.html">How To Guides</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="../compile_models/index.html">编译深度学习模型</a></li>
<li class="toctree-l2 current"><a class="reference internal" href="../deploy/index.html">TVM 部署模型和集成</a><ul class="current">
<li class="toctree-l3"><a class="reference internal" href="../deploy/index.html#build-the-tvm-runtime-library">构建 TVM 运行 runtime 库</a></li>
<li class="toctree-l3"><a class="reference internal" href="../deploy/index.html#cross-compile-the-tvm-runtime-for-other-architectures">为其它架构交叉编译TVM runtime</a></li>
<li class="toctree-l3"><a class="reference internal" href="../deploy/index.html#optimize-and-tune-models-for-target-devices">针对目标设备优化和调整模型</a></li>
<li class="toctree-l3"><a class="reference internal" href="../deploy/index.html#deploy-optimized-model-on-target-devices">在目标设备上部署优化的模型</a></li>
<li class="toctree-l3 current"><a class="reference internal" href="../deploy/index.html#additional-deployment-how-tos">其他部署方式</a><ul class="current">
<li class="toctree-l4 current"><a class="reference internal" href="index.html">Deploy Deep Learning Models</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../work_with_relay/index.html">Work With Relay</a></li>
<li class="toctree-l2"><a class="reference internal" href="../work_with_schedules/index.html">Work With Tensor Expression and Schedules</a></li>
<li class="toctree-l2"><a class="reference internal" href="../optimize_operators/index.html">优化张量算子</a></li>
<li class="toctree-l2"><a class="reference internal" href="../tune_with_autotvm/index.html">Auto-Tune with Templates and AutoTVM</a></li>
<li class="toctree-l2"><a class="reference internal" href="../tune_with_autoscheduler/index.html">Use AutoScheduler for Template-Free Scheduling</a></li>
<li class="toctree-l2"><a class="reference internal" href="../work_with_microtvm/index.html">Work With microTVM</a></li>
<li class="toctree-l2"><a class="reference internal" href="../extend_tvm/index.html">Extend TVM</a></li>
<li class="toctree-l2"><a class="reference internal" href="../profile/index.html">Profile Models</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../errors.html">Handle TVM Errors</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../faq.html">常见提问</a></li>
</ul>
</li>
</ul>
<p class="caption" role="heading"><span class="caption-text">开发者引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../dev/tutorial/index.html">Developer Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../dev/how_to/how_to.html">开发者指南</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">架构指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../arch/index.html">Design and Architecture</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">主题引导</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../topic/microtvm/index.html">microTVM：裸机使用TVM</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../topic/vta/index.html">VTA: Versatile Tensor Accelerator</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">参考指南</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../reference/langref/index.html">语言参考</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/api/python/index.html">Python API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/api/links.html">Other APIs</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../reference/publications.html">Publications</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../genindex.html">索引</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
      
      <nav class="wy-nav-top" aria-label="top navigation" data-toggle="wy-nav-top">
        
            <div class="togglemenu">

            </div>
            <div class="nav-content">
              <!-- tvm -->
              Table of content
            </div>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        

          




















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../index.html">Docs</a> <span class="br-arrow">></span></li>
        
          <li><a href="../index.html">How To Guides</a> <span class="br-arrow">></span></li>
        
          <li><a href="../deploy/index.html">TVM 部署模型和集成</a> <span class="br-arrow">></span></li>
        
          <li><a href="index.html">Deploy Deep Learning Models</a> <span class="br-arrow">></span></li>
        
      <li>在CPU上部署Hugging Face Pruned模型</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
            <a href="../../_sources/how_to/deploy_models/deploy_sparse.rst.txt" rel="nofollow"> <img src="../../_static//img/source.svg" alt="viewsource"/></a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="sphx-glr-download-link-note admonition note">
<p class="admonition-title">注解</p>
<p>Click <a class="reference internal" href="#sphx-glr-download-how-to-deploy-models-deploy-sparse-py"><span class="std std-ref">here</span></a> to download the full example code</p>
</div>
<div class="sphx-glr-example-title section" id="deploy-a-hugging-face-pruned-model-on-cpu">
<span id="sphx-glr-how-to-deploy-models-deploy-sparse-py"></span><h1>在CPU上部署Hugging Face Pruned模型<a class="headerlink" href="#deploy-a-hugging-face-pruned-model-on-cpu" title="永久链接至标题">¶</a></h1>
<p><strong>作者</strong>: <a class="reference external" href="https://github.com/jwfromm">Josh Fromm</a></p>
<p>This tutorial demonstrates how to take any pruned model, in this case <a class="reference external" href="https://huggingface.co/huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad">PruneBert
from Hugging Face</a>,
and use TVM to leverage the model’s sparsity support to produce real speedups. Although
the primary purpose of this tutorial is to realize speedups on already pruned
models, it may also be useful to estimate how fast a model would be <em>if</em> it were
pruned. To this end, we also provide a function that takes an unpruned model and
replaces its weights
with random and pruned weights at a specified sparsity. This may be a useful
feature when trying to decide if a model is worth pruning or not.</p>
<p>Before we get into the code, it’s useful to discuss sparsity and pruning
and dig into the two
different types of sparsity: <strong>structured</strong> and <strong>unstructured</strong>.</p>
<p>Pruning is a technique primarily used to reduce the parameter size of a model
by replacing weight values with 0s. Although many methods exist for choosing which
weights should be set to 0, the most straight forward is by picking the
weights with the smallest value. Typically, weights are pruned to a desired
sparsity percentage. For example, a 95% sparse model would have only 5% of
its weights non-zero. Pruning to very high sparsities often requires
finetuning or full retraining as it tends to be a lossy approximation.
Although parameter size benefits are quite easy to obtain from a pruned model
through simple compression, leveraging sparsity to yield runtime speedups
is more complicated.</p>
<p>In structured sparsity weights are pruned with the goal of clustering
pruned weights together. In other words, they are pruned using both their
value and location. The benefit of bunching up pruned weights is that it allows
an algorithm such as matrix multiplication to skip entire blocks. It turns out
that some degree of <em>block sparsity</em> is very important to realizing significant
speedups on most hardware available today.
This is because when loading memory in most CPUs or GPUs,
it doesn’t save any work to skip reading a single value at a time, instead an entire
chunk or tile is read in and executed using something like vectorized instructions.</p>
<p>Unstructured sparse weights are those that are pruned only on the value of
the original weights. They may appear to be scattered randomly throughout
a tensor rather than in chunks like we’d see in block sparse weights.
At low sparsities, unstructured pruning techniques are difficult to
accelerate. However, at high sparsities many blocks of all 0 values
will naturally appear, making it possible to accelerate.</p>
<p>This tutorial interacts with both structured and unstructured sparsity.
Hugging Face’s PruneBert model is unstructured but 95% sparse, allowing us
to apply TVM’s block sparse optimizations to it, even if not optimally.
When generating random sparse weights for an unpruned model, we do so with structured
sparsity. A fun exercise is comparing the real speed of PruneBert with the block
sparse speed using fake weights to see the benefit of structured sparsity.</p>
<div class="section" id="load-required-modules">
<h2>加载所需的模块<a class="headerlink" href="#load-required-modules" title="永久链接至标题">¶</a></h2>
<p>除TVM、scipy外，还需要最新的transformers和tensorflow 2.2+。</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">tvm</span>
<span class="kn">import</span> <span class="nn">time</span>
<span class="kn">import</span> <span class="nn">itertools</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">import</span> <span class="nn">tensorflow</span> <span class="k">as</span> <span class="nn">tf</span>
<span class="kn">from</span> <span class="nn">tvm</span> <span class="k">import</span> <span class="n">relay</span><span class="p">,</span> <span class="n">runtime</span>
<span class="kn">from</span> <span class="nn">tvm.contrib</span> <span class="k">import</span> <span class="n">graph_executor</span>
<span class="kn">from</span> <span class="nn">tvm.relay</span> <span class="k">import</span> <span class="n">data_dep_optimization</span> <span class="k">as</span> <span class="n">ddo</span>
<span class="kn">from</span> <span class="nn">tensorflow.python.framework.convert_to_constants</span> <span class="k">import</span> <span class="p">(</span>
    <span class="n">convert_variables_to_constants_v2</span><span class="p">,</span>
<span class="p">)</span>
<span class="kn">import</span> <span class="nn">scipy.sparse</span> <span class="k">as</span> <span class="nn">sp</span>


<span class="c1"># Ask tensorflow to limit its GPU memory to what&#39;s actually needed</span>
<span class="c1"># instead of gobbling everything that&#39;s available.</span>
<span class="c1"># https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth</span>
<span class="c1"># This way this tutorial is a little more friendly to sphinx-gallery.</span>
<span class="n">gpus</span> <span class="o">=</span> <span class="n">tf</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">list_physical_devices</span><span class="p">(</span><span class="s2">&quot;GPU&quot;</span><span class="p">)</span>
<span class="k">if</span> <span class="n">gpus</span><span class="p">:</span>
    <span class="k">try</span><span class="p">:</span>
        <span class="k">for</span> <span class="n">gpu</span> <span class="ow">in</span> <span class="n">gpus</span><span class="p">:</span>
            <span class="n">tf</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">experimental</span><span class="o">.</span><span class="n">set_memory_growth</span><span class="p">(</span><span class="n">gpu</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
        <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;tensorflow will use experimental.set_memory_growth(True)&quot;</span><span class="p">)</span>
    <span class="k">except</span> <span class="ne">RuntimeError</span> <span class="k">as</span> <span class="n">e</span><span class="p">:</span>
        <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;experimental.set_memory_growth option is not available: </span><span class="si">{}</span><span class="s2">&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">e</span><span class="p">))</span>
</pre></div>
</div>
<p class="sphx-glr-script-out">输出:</p>
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>tensorflow will use experimental.set_memory_growth(True)
</pre></div>
</div>
</div>
<div class="section" id="configure-settings">
<h2>配置设置<a class="headerlink" href="#configure-settings" title="永久链接至标题">¶</a></h2>
<p>让我们从定义一些参数开始，这些参数定义了要运行的模型类型和稀疏度。</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># The name of the transformer model to download and run.</span>
<span class="n">name</span> <span class="o">=</span> <span class="s2">&quot;huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad&quot;</span>
<span class="c1"># The number of batches in an input.</span>
<span class="n">batch_size</span> <span class="o">=</span> <span class="mi">1</span>
<span class="c1"># The length of each input sequence.</span>
<span class="n">seq_len</span> <span class="o">=</span> <span class="mi">128</span>
<span class="c1"># TVM platform identifier. Note that best cpu performance can be achieved by setting -mcpu</span>
<span class="c1"># appropriately for your specific machine. CUDA and ROCm are also supported.</span>
<span class="n">target</span> <span class="o">=</span> <span class="s2">&quot;llvm&quot;</span>
<span class="c1"># Which device to run on. Should be one of tvm.cpu() or tvm.cuda().</span>
<span class="n">dev</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span>
<span class="c1"># If true, then a sparse variant of the network will be run and</span>
<span class="c1"># benchmarked.</span>
<span class="n">measure_sparse</span> <span class="o">=</span> <span class="kc">True</span>
<span class="c1"># The block size of structured sparsity to convert weight tensors</span>
<span class="c1"># into. Changing this parameter may yield speedups for some platforms.</span>
<span class="n">bs_r</span> <span class="o">=</span> <span class="mi">1</span>
<span class="c1"># For models besides PruneBert (which is 95% sparse), this parameter</span>
<span class="c1"># determines how sparse the generated weights should be. The higher</span>
<span class="c1"># the sparsity, the faster the result.</span>
<span class="n">sparsity</span> <span class="o">=</span> <span class="mf">0.85</span>
</pre></div>
</div>
</div>
<div class="section" id="download-and-convert-transformers-model">
<h2>下载并转换Transformers Model<a class="headerlink" href="#download-and-convert-transformers-model" title="永久链接至标题">¶</a></h2>
<p>Now we’ll grab a model from the transformers module, download it,
convert it into a TensorFlow graphdef in preperation for converting that graphdef into
a relay graph that we can optimize and deploy.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">load_keras_model</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">name</span><span class="p">,</span> <span class="n">seq_len</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">,</span> <span class="n">report_runtime</span><span class="o">=</span><span class="kc">True</span><span class="p">):</span>
    <span class="n">model</span> <span class="o">=</span> <span class="n">module</span><span class="o">.</span><span class="n">from_pretrained</span><span class="p">(</span><span class="n">name</span><span class="p">)</span>
    <span class="n">dummy_input</span> <span class="o">=</span> <span class="n">tf</span><span class="o">.</span><span class="n">keras</span><span class="o">.</span><span class="n">Input</span><span class="p">(</span><span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="n">seq_len</span><span class="p">],</span> <span class="n">batch_size</span><span class="o">=</span><span class="n">batch_size</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s2">&quot;int32&quot;</span><span class="p">)</span>
    <span class="n">dummy_out</span> <span class="o">=</span> <span class="n">model</span><span class="p">(</span><span class="n">dummy_input</span><span class="p">)</span>  <span class="c1"># Propagate shapes through the keras model.</span>
    <span class="k">if</span> <span class="n">report_runtime</span><span class="p">:</span>
        <span class="n">np_input</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">uniform</span><span class="p">(</span><span class="n">size</span><span class="o">=</span><span class="p">[</span><span class="n">batch_size</span><span class="p">,</span> <span class="n">seq_len</span><span class="p">],</span> <span class="n">low</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">high</span><span class="o">=</span><span class="n">seq_len</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span>
            <span class="s2">&quot;int32&quot;</span>
        <span class="p">)</span>
        <span class="n">start</span> <span class="o">=</span> <span class="n">time</span><span class="o">.</span><span class="n">time</span><span class="p">()</span>
        <span class="n">repeats</span> <span class="o">=</span> <span class="mi">50</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">repeats</span><span class="p">):</span>
            <span class="n">np_out</span> <span class="o">=</span> <span class="n">model</span><span class="p">(</span><span class="n">np_input</span><span class="p">)</span>
        <span class="n">end</span> <span class="o">=</span> <span class="n">time</span><span class="o">.</span><span class="n">time</span><span class="p">()</span>
        <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Keras Runtime: </span><span class="si">%f</span><span class="s2"> ms.&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="mi">1000</span> <span class="o">*</span> <span class="p">((</span><span class="n">end</span> <span class="o">-</span> <span class="n">start</span><span class="p">)</span> <span class="o">/</span> <span class="n">repeats</span><span class="p">)))</span>
    <span class="k">return</span> <span class="n">model</span>


<span class="k">def</span> <span class="nf">convert_to_graphdef</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">,</span> <span class="n">seq_len</span><span class="p">):</span>
    <span class="n">model_func</span> <span class="o">=</span> <span class="n">tf</span><span class="o">.</span><span class="n">function</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">model</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
    <span class="n">input_dict</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="n">_saved_model_inputs_spec</span>
    <span class="n">input_spec</span> <span class="o">=</span> <span class="n">input_dict</span><span class="p">[</span><span class="nb">list</span><span class="p">(</span><span class="n">input_dict</span><span class="o">.</span><span class="n">keys</span><span class="p">())[</span><span class="mi">0</span><span class="p">]]</span>
    <span class="n">model_func</span> <span class="o">=</span> <span class="n">model_func</span><span class="o">.</span><span class="n">get_concrete_function</span><span class="p">(</span>
        <span class="n">tf</span><span class="o">.</span><span class="n">TensorSpec</span><span class="p">([</span><span class="n">batch_size</span><span class="p">,</span> <span class="n">seq_len</span><span class="p">],</span> <span class="n">input_spec</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
    <span class="p">)</span>
    <span class="n">frozen_func</span> <span class="o">=</span> <span class="n">convert_variables_to_constants_v2</span><span class="p">(</span><span class="n">model_func</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">frozen_func</span><span class="o">.</span><span class="n">graph</span><span class="o">.</span><span class="n">as_graph_def</span><span class="p">()</span>


<span class="k">def</span> <span class="nf">download_model</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">,</span> <span class="n">seq_len</span><span class="p">):</span>
    <span class="kn">import</span> <span class="nn">transformers</span>

    <span class="n">module</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="n">transformers</span><span class="p">,</span> <span class="s2">&quot;TFBertForSequenceClassification&quot;</span><span class="p">)</span>
    <span class="n">model</span> <span class="o">=</span> <span class="n">load_keras_model</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">name</span><span class="o">=</span><span class="n">name</span><span class="p">,</span> <span class="n">batch_size</span><span class="o">=</span><span class="n">batch_size</span><span class="p">,</span> <span class="n">seq_len</span><span class="o">=</span><span class="n">seq_len</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">convert_to_graphdef</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">,</span> <span class="n">seq_len</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="convert-to-relay-graph">
<h2>转换为Relay Graph<a class="headerlink" href="#convert-to-relay-graph" title="永久链接至标题">¶</a></h2>
<p>We now have all the tooling to get a transformers model in the right format
for relay conversion. Let’s import it! In the following function we
save the imported graph in relay’s json format so that we dont have
to reimport from tensorflow each time this script is run.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">import_graphdef</span><span class="p">(</span>
    <span class="n">name</span><span class="p">,</span>
    <span class="n">batch_size</span><span class="p">,</span>
    <span class="n">seq_len</span><span class="p">,</span>
    <span class="n">save_relay</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
    <span class="n">relay_file</span><span class="o">=</span><span class="s2">&quot;model.json&quot;</span><span class="p">,</span>
    <span class="n">relay_params</span><span class="o">=</span><span class="s2">&quot;model.params&quot;</span><span class="p">,</span>
<span class="p">):</span>
    <span class="n">abs_path</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">dirname</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">abspath</span><span class="p">(</span><span class="vm">__file__</span><span class="p">))</span>
    <span class="n">shape_dict</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;input_1&quot;</span><span class="p">:</span> <span class="p">(</span><span class="n">batch_size</span><span class="p">,</span> <span class="n">seq_len</span><span class="p">)}</span>
    <span class="n">relay_file</span> <span class="o">=</span> <span class="p">(</span><span class="s2">&quot;</span><span class="si">%s</span><span class="s2">_</span><span class="si">%d</span><span class="s2">_</span><span class="si">%d</span><span class="s2">_</span><span class="si">%s</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">,</span> <span class="n">seq_len</span><span class="p">,</span> <span class="n">relay_file</span><span class="p">))</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s2">&quot;/&quot;</span><span class="p">,</span> <span class="s2">&quot;_&quot;</span><span class="p">)</span>
    <span class="n">relay_params</span> <span class="o">=</span> <span class="p">(</span><span class="s2">&quot;</span><span class="si">%s</span><span class="s2">_</span><span class="si">%d</span><span class="s2">_</span><span class="si">%d</span><span class="s2">_</span><span class="si">%s</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">,</span> <span class="n">seq_len</span><span class="p">,</span> <span class="n">relay_params</span><span class="p">))</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s2">&quot;/&quot;</span><span class="p">,</span> <span class="s2">&quot;_&quot;</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">abs_path</span><span class="p">,</span> <span class="n">relay_file</span><span class="p">))</span> <span class="ow">and</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span>
        <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">abs_path</span><span class="p">,</span> <span class="n">relay_params</span><span class="p">)</span>
    <span class="p">):</span>
        <span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">abs_path</span><span class="p">,</span> <span class="n">relay_file</span><span class="p">),</span> <span class="s2">&quot;r&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">fi</span><span class="p">:</span>
            <span class="n">mod</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">ir</span><span class="o">.</span><span class="n">load_json</span><span class="p">(</span><span class="n">fi</span><span class="o">.</span><span class="n">read</span><span class="p">())</span>
        <span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">abs_path</span><span class="p">,</span> <span class="n">relay_params</span><span class="p">),</span> <span class="s2">&quot;rb&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">fi</span><span class="p">:</span>
            <span class="n">params</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">load_param_dict</span><span class="p">(</span><span class="n">fi</span><span class="o">.</span><span class="n">read</span><span class="p">())</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="n">graph_def</span> <span class="o">=</span> <span class="n">download_model</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">,</span> <span class="n">seq_len</span><span class="p">)</span>

        <span class="n">mod</span><span class="p">,</span> <span class="n">params</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">frontend</span><span class="o">.</span><span class="n">from_tensorflow</span><span class="p">(</span><span class="n">graph_def</span><span class="p">,</span> <span class="n">shape</span><span class="o">=</span><span class="n">shape_dict</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">save_relay</span><span class="p">:</span>
            <span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">abs_path</span><span class="p">,</span> <span class="n">relay_file</span><span class="p">),</span> <span class="s2">&quot;w&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">fo</span><span class="p">:</span>
                <span class="n">fo</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="n">tvm</span><span class="o">.</span><span class="n">ir</span><span class="o">.</span><span class="n">save_json</span><span class="p">(</span><span class="n">mod</span><span class="p">))</span>
            <span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">abs_path</span><span class="p">,</span> <span class="n">relay_params</span><span class="p">),</span> <span class="s2">&quot;wb&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">fo</span><span class="p">:</span>
                <span class="n">fo</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="n">runtime</span><span class="o">.</span><span class="n">save_param_dict</span><span class="p">(</span><span class="n">params</span><span class="p">))</span>

    <span class="k">return</span> <span class="n">mod</span><span class="p">,</span> <span class="nb">dict</span><span class="p">(</span><span class="n">params</span><span class="o">.</span><span class="n">items</span><span class="p">()),</span> <span class="n">shape_dict</span>
</pre></div>
</div>
</div>
<div class="section" id="run-the-dense-graph">
<h2>运行稠密图<a class="headerlink" href="#run-the-dense-graph" title="永久链接至标题">¶</a></h2>
<p>Let’s run the default version of the imported model. Note that even if
the weights are sparse, we won’t see any speedup because we are using
regular dense matrix multiplications on these dense (but mostly zero)
tensors instead of sparse aware kernels.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">run_relay_graph</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">shape_dict</span><span class="p">,</span> <span class="n">target</span><span class="p">,</span> <span class="n">dev</span><span class="p">):</span>
    <span class="k">with</span> <span class="n">relay</span><span class="o">.</span><span class="n">build_config</span><span class="p">(</span><span class="n">opt_level</span><span class="o">=</span><span class="mi">3</span><span class="p">):</span>
        <span class="n">lib</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">build</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">target</span><span class="o">=</span><span class="n">target</span><span class="p">,</span> <span class="n">params</span><span class="o">=</span><span class="n">params</span><span class="p">)</span>
    <span class="n">input_shape</span> <span class="o">=</span> <span class="n">shape_dict</span><span class="p">[</span><span class="s2">&quot;input_1&quot;</span><span class="p">]</span>
    <span class="n">dummy_data</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">uniform</span><span class="p">(</span><span class="n">size</span><span class="o">=</span><span class="n">input_shape</span><span class="p">,</span> <span class="n">low</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">high</span><span class="o">=</span><span class="n">input_shape</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s2">&quot;int32&quot;</span><span class="p">)</span>

    <span class="n">m</span> <span class="o">=</span> <span class="n">graph_executor</span><span class="o">.</span><span class="n">GraphModule</span><span class="p">(</span><span class="n">lib</span><span class="p">[</span><span class="s2">&quot;default&quot;</span><span class="p">](</span><span class="n">dev</span><span class="p">))</span>
    <span class="n">m</span><span class="o">.</span><span class="n">set_input</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">dummy_data</span><span class="p">)</span>
    <span class="n">m</span><span class="o">.</span><span class="n">run</span><span class="p">()</span>
    <span class="n">tvm_output</span> <span class="o">=</span> <span class="n">m</span><span class="o">.</span><span class="n">get_output</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span>

    <span class="nb">print</span><span class="p">(</span><span class="n">m</span><span class="o">.</span><span class="n">benchmark</span><span class="p">(</span><span class="n">dev</span><span class="p">,</span> <span class="n">repeat</span><span class="o">=</span><span class="mi">5</span><span class="p">,</span> <span class="n">number</span><span class="o">=</span><span class="mi">5</span><span class="p">))</span>
    <span class="k">return</span> <span class="n">tvm_output</span>


<span class="k">def</span> <span class="nf">run_dense</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">shape_dict</span><span class="p">,</span> <span class="n">target</span><span class="p">,</span> <span class="n">dev</span><span class="p">):</span>
    <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Dense Model Benchmark:&quot;</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">run_relay_graph</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">shape_dict</span><span class="p">,</span> <span class="n">target</span><span class="p">,</span> <span class="n">dev</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="run-the-sparse-graph">
<h2>运行稀疏图<a class="headerlink" href="#run-the-sparse-graph" title="永久链接至标题">¶</a></h2>
<p>Next we’ll convert the graph into a sparse representation and generate
fake sparse weights if needed. Then we’ll use the same benchmarking
script as dense to see how much faster we go! We apply a few relay passes
to the graph to get it leveraging sparsity. First we use
<cite>simplify_fc_transpose</cite> to use transposes on the weights of dense layers
into the parameters. This makes it easier to convert to matrix multiplies
to sparse versions. Next we apply <cite>bsr_dense.convert</cite> to identify all
weight matrices that can be sparse, and automatically replace them.</p>
<p>The <cite>bsr_dense.convert</cite> call below is doing the heavy lifting of identifying
which weights in the model can be made sparse by checking if they are
at least <cite>sparsity_threshold</cite> percent sparse. If so, it converts those
weights into <em>Block Compressed Row Format (BSR)</em>. BSR is essentially
a representation that indexes into the nonzero chunks of the tensor,
making it easy for an algorithm to load those non-zero chunks and ignore
the rest of the tensor. Once the sparse weights are in BSR format,
<cite>relay.transform.DenseToSparse</cite> is applied to actually replace
<cite>relay.dense</cite> operations with <cite>relay.sparse_dense</cite> calls that can be
run faster.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">random_bsr_matrix</span><span class="p">(</span><span class="n">M</span><span class="p">,</span> <span class="n">N</span><span class="p">,</span> <span class="n">BS_R</span><span class="p">,</span> <span class="n">BS_C</span><span class="p">,</span> <span class="n">density</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s2">&quot;float32&quot;</span><span class="p">):</span>
    <span class="n">Y</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">zeros</span><span class="p">((</span><span class="n">M</span><span class="p">,</span> <span class="n">N</span><span class="p">),</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">)</span>
    <span class="k">assert</span> <span class="n">M</span> <span class="o">%</span> <span class="n">BS_R</span> <span class="o">==</span> <span class="mi">0</span>
    <span class="k">assert</span> <span class="n">N</span> <span class="o">%</span> <span class="n">BS_C</span> <span class="o">==</span> <span class="mi">0</span>
    <span class="n">nnz</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">density</span> <span class="o">*</span> <span class="n">M</span> <span class="o">*</span> <span class="n">N</span><span class="p">)</span>
    <span class="n">num_blocks</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">nnz</span> <span class="o">/</span> <span class="p">(</span><span class="n">BS_R</span> <span class="o">*</span> <span class="n">BS_C</span><span class="p">))</span> <span class="o">+</span> <span class="mi">1</span>
    <span class="n">candidate_blocks</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">asarray</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="n">itertools</span><span class="o">.</span><span class="n">product</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">M</span><span class="p">,</span> <span class="n">BS_R</span><span class="p">),</span> <span class="nb">range</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">N</span><span class="p">,</span> <span class="n">BS_C</span><span class="p">))))</span>
    <span class="k">assert</span> <span class="n">candidate_blocks</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">==</span> <span class="n">M</span> <span class="o">//</span> <span class="n">BS_R</span> <span class="o">*</span> <span class="n">N</span> <span class="o">//</span> <span class="n">BS_C</span>
    <span class="n">chosen_blocks</span> <span class="o">=</span> <span class="n">candidate_blocks</span><span class="p">[</span>
        <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">choice</span><span class="p">(</span><span class="n">candidate_blocks</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">size</span><span class="o">=</span><span class="n">num_blocks</span><span class="p">,</span> <span class="n">replace</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
    <span class="p">]</span>
    <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">chosen_blocks</span><span class="p">)):</span>
        <span class="n">r</span><span class="p">,</span> <span class="n">c</span> <span class="o">=</span> <span class="n">chosen_blocks</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
        <span class="n">Y</span><span class="p">[</span><span class="n">r</span> <span class="p">:</span> <span class="n">r</span> <span class="o">+</span> <span class="n">BS_R</span><span class="p">,</span> <span class="n">c</span> <span class="p">:</span> <span class="n">c</span> <span class="o">+</span> <span class="n">BS_C</span><span class="p">]</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">uniform</span><span class="p">(</span><span class="o">-</span><span class="mf">0.1</span><span class="p">,</span> <span class="mf">0.1</span><span class="p">,</span> <span class="p">(</span><span class="n">BS_R</span><span class="p">,</span> <span class="n">BS_C</span><span class="p">))</span>
    <span class="n">s</span> <span class="o">=</span> <span class="n">sp</span><span class="o">.</span><span class="n">bsr_matrix</span><span class="p">(</span><span class="n">Y</span><span class="p">,</span> <span class="n">blocksize</span><span class="o">=</span><span class="p">(</span><span class="n">BS_R</span><span class="p">,</span> <span class="n">BS_C</span><span class="p">))</span>
    <span class="k">assert</span> <span class="n">s</span><span class="o">.</span><span class="n">data</span><span class="o">.</span><span class="n">shape</span> <span class="o">==</span> <span class="p">(</span><span class="n">num_blocks</span><span class="p">,</span> <span class="n">BS_R</span><span class="p">,</span> <span class="n">BS_C</span><span class="p">)</span>
    <span class="k">assert</span> <span class="n">s</span><span class="o">.</span><span class="n">data</span><span class="o">.</span><span class="n">size</span> <span class="o">&gt;=</span> <span class="n">nnz</span>
    <span class="k">assert</span> <span class="n">s</span><span class="o">.</span><span class="n">indices</span><span class="o">.</span><span class="n">shape</span> <span class="o">==</span> <span class="p">(</span><span class="n">num_blocks</span><span class="p">,)</span>
    <span class="k">assert</span> <span class="n">s</span><span class="o">.</span><span class="n">indptr</span><span class="o">.</span><span class="n">shape</span> <span class="o">==</span> <span class="p">(</span><span class="n">M</span> <span class="o">//</span> <span class="n">BS_R</span> <span class="o">+</span> <span class="mi">1</span><span class="p">,)</span>
    <span class="k">return</span> <span class="n">s</span><span class="o">.</span><span class="n">todense</span><span class="p">()</span>


<span class="k">def</span> <span class="nf">random_sparse_bert_params</span><span class="p">(</span><span class="n">func</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">density</span><span class="p">,</span> <span class="n">BS_R</span><span class="p">,</span> <span class="n">BS_C</span><span class="p">):</span>
    <span class="k">def</span> <span class="nf">deepcopy</span><span class="p">(</span><span class="n">param_dic</span><span class="p">):</span>
        <span class="n">ret</span> <span class="o">=</span> <span class="p">{}</span>
        <span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="n">param_dic</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
            <span class="n">ret</span><span class="p">[</span><span class="n">k</span><span class="p">]</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">v</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
        <span class="k">return</span> <span class="n">ret</span>

    <span class="n">new_params</span> <span class="o">=</span> <span class="n">deepcopy</span><span class="p">(</span><span class="n">params</span><span class="p">)</span>
    <span class="n">dense_weight_names</span> <span class="o">=</span> <span class="n">relay</span><span class="o">.</span><span class="n">analysis</span><span class="o">.</span><span class="n">sparse_dense</span><span class="o">.</span><span class="n">_search_dense_op_weight</span><span class="p">(</span><span class="n">func</span><span class="p">)</span>
    <span class="k">for</span> <span class="n">item</span> <span class="ow">in</span> <span class="n">dense_weight_names</span><span class="p">:</span>
        <span class="n">name</span> <span class="o">=</span> <span class="nb">str</span><span class="p">(</span><span class="n">item</span><span class="p">)</span>
        <span class="n">shape</span> <span class="o">=</span> <span class="n">new_params</span><span class="p">[</span><span class="n">name</span><span class="p">]</span><span class="o">.</span><span class="n">shape</span>
        <span class="k">if</span> <span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">%</span> <span class="n">BS_R</span> <span class="o">==</span> <span class="mi">0</span> <span class="ow">and</span> <span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">%</span> <span class="n">BS_C</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
            <span class="n">new_w</span> <span class="o">=</span> <span class="n">random_bsr_matrix</span><span class="p">(</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">BS_R</span><span class="p">,</span> <span class="n">BS_C</span><span class="p">,</span> <span class="n">density</span><span class="p">)</span>
            <span class="n">new_params</span><span class="p">[</span><span class="n">name</span><span class="p">]</span> <span class="o">=</span> <span class="n">tvm</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">new_w</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">new_params</span>


<span class="k">def</span> <span class="nf">run_sparse</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">shape_dict</span><span class="p">,</span> <span class="n">target</span><span class="p">,</span> <span class="n">dev</span><span class="p">,</span> <span class="n">bs_r</span><span class="p">,</span> <span class="n">sparsity</span><span class="p">,</span> <span class="n">gen_weights</span><span class="p">):</span>
    <span class="n">mod</span><span class="p">,</span> <span class="n">params</span> <span class="o">=</span> <span class="n">ddo</span><span class="o">.</span><span class="n">simplify_fc_transpose</span><span class="o">.</span><span class="n">convert</span><span class="p">(</span><span class="n">mod</span><span class="p">[</span><span class="s2">&quot;main&quot;</span><span class="p">],</span> <span class="n">params</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">gen_weights</span><span class="p">:</span>
        <span class="n">params</span> <span class="o">=</span> <span class="n">random_sparse_bert_params</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">BS_R</span><span class="o">=</span><span class="n">bs_r</span><span class="p">,</span> <span class="n">BS_C</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">density</span><span class="o">=</span><span class="mi">1</span> <span class="o">-</span> <span class="n">sparsity</span><span class="p">)</span>
    <span class="n">mod</span><span class="p">,</span> <span class="n">params</span> <span class="o">=</span> <span class="n">ddo</span><span class="o">.</span><span class="n">bsr_dense</span><span class="o">.</span><span class="n">convert</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="p">(</span><span class="n">bs_r</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span> <span class="n">sparsity_threshold</span><span class="o">=</span><span class="mf">0.8</span><span class="p">)</span>
    <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Block Sparse Model with </span><span class="si">{blocksize}</span><span class="s2">x1 blocks:&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">blocksize</span><span class="o">=</span><span class="n">bs_r</span><span class="p">))</span>
    <span class="k">return</span> <span class="n">run_relay_graph</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">shape_dict</span><span class="p">,</span> <span class="n">target</span><span class="p">,</span> <span class="n">dev</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="run-all-the-code">
<h2>运行所有代码！<a class="headerlink" href="#run-all-the-code" title="永久链接至标题">¶</a></h2>
<p>就这样！现在我们只需调用所有需要的函数，根据设置的参数对模型进行基准测试。请注意，要运行此代码，首先需要取消最后一行的注释。</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">benchmark</span><span class="p">():</span>
    <span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">shape_dict</span> <span class="o">=</span> <span class="n">import_graphdef</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">,</span> <span class="n">seq_len</span><span class="p">)</span>
    <span class="n">run_dense</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">shape_dict</span><span class="p">,</span> <span class="n">target</span><span class="p">,</span> <span class="n">dev</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">measure_sparse</span><span class="p">:</span>
        <span class="n">gen_weights</span> <span class="o">=</span> <span class="s2">&quot;prune&quot;</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">name</span>
        <span class="n">run_sparse</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">shape_dict</span><span class="p">,</span> <span class="n">target</span><span class="p">,</span> <span class="n">dev</span><span class="p">,</span> <span class="n">bs_r</span><span class="p">,</span> <span class="n">sparsity</span><span class="p">,</span> <span class="n">gen_weights</span><span class="p">)</span>


<span class="c1"># benchmark()</span>
</pre></div>
</div>
</div>
<div class="section" id="sample-output">
<h2>样本输出<a class="headerlink" href="#sample-output" title="永久链接至标题">¶</a></h2>
<p>For reference, below is the output of the script when run on an AMD CPU
and shows about a 2.5X speedup from using sparsity.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># Dense Model Benchmark:</span>
<span class="c1"># Cannot find config for target=llvm, workload=(&#39;dense_nopack.x86&#39;, (&#39;TENSOR&#39;, (1, 768), &#39;float32&#39;), (&#39;TENSOR&#39;, (2, 768), &#39;float32&#39;), None, &#39;float32&#39;). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=llvm, workload=(&#39;dense_nopack.x86&#39;, (&#39;TENSOR&#39;, (1, 768), &#39;float32&#39;), (&#39;TENSOR&#39;, (768, 768), &#39;float32&#39;), None, &#39;float32&#39;). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=llvm, workload=(&#39;dense_nopack.x86&#39;, (&#39;TENSOR&#39;, (128, 3072), &#39;float32&#39;), (&#39;TENSOR&#39;, (768, 3072), &#39;float32&#39;), None, &#39;float32&#39;). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=llvm, workload=(&#39;dense_nopack.x86&#39;, (&#39;TENSOR&#39;, (128, 768), &#39;float32&#39;), (&#39;TENSOR&#39;, (3072, 768), &#39;float32&#39;), None, &#39;float32&#39;). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=llvm, workload=(&#39;dense_nopack.x86&#39;, (&#39;TENSOR&#39;, (128, 768), &#39;float32&#39;), (&#39;TENSOR&#39;, (768, 768), &#39;float32&#39;), None, &#39;float32&#39;). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=llvm, workload=(&#39;batch_matmul.x86&#39;, (&#39;TENSOR&#39;, (12, 128, 128), &#39;float32&#39;), (&#39;TENSOR&#39;, (12, 64, 128), &#39;float32&#39;)). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=llvm, workload=(&#39;batch_matmul.x86&#39;, (&#39;TENSOR&#39;, (12, 128, 64), &#39;float32&#39;), (&#39;TENSOR&#39;, (12, 128, 64), &#39;float32&#39;)). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Runtime:             165.26 ms           (12.83 ms)</span>
<span class="c1"># Block Sparse Model with 1x1 blocks:</span>
<span class="c1"># Runtime:             67.75 ms            (8.83 ms)</span>

<span class="c1"># Here is the output of this script on a GPU (GTX 1070) with the target &quot;cuda -libs=cublas&quot;.</span>
<span class="c1">#</span>
<span class="c1"># Dense Model Benchmark:</span>
<span class="c1"># Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=(&#39;dense_cublas.cuda&#39;, (&#39;TENSOR&#39;, (1, 768), &#39;float32&#39;), (&#39;TENSOR&#39;, (2, 768), &#39;float32&#39;), None, &#39;float32&#39;). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=(&#39;dense_cublas.cuda&#39;, (&#39;TENSOR&#39;, (1, 768), &#39;float32&#39;), (&#39;TENSOR&#39;, (768, 768), &#39;float32&#39;), None, &#39;float32&#39;). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=(&#39;dense_cublas.cuda&#39;, (&#39;TENSOR&#39;, (128, 3072), &#39;float32&#39;), (&#39;TENSOR&#39;, (768, 3072), &#39;float32&#39;), None, &#39;float32&#39;). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=(&#39;dense_cublas.cuda&#39;, (&#39;TENSOR&#39;, (128, 768), &#39;float32&#39;), (&#39;TENSOR&#39;, (3072, 768), &#39;float32&#39;), None, &#39;float32&#39;). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=(&#39;dense_cublas.cuda&#39;, (&#39;TENSOR&#39;, (128, 768), &#39;float32&#39;), (&#39;TENSOR&#39;, (768, 768), &#39;float32&#39;), None, &#39;float32&#39;). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=(&#39;batch_matmul_cublas.cuda&#39;, (&#39;TENSOR&#39;, (12, 128, 128), &#39;float32&#39;), (&#39;TENSOR&#39;, (12, 64, 128), &#39;float32&#39;), (12, 128, 64)). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=(&#39;batch_matmul_cublas.cuda&#39;, (&#39;TENSOR&#39;, (12, 128, 64), &#39;float32&#39;), (&#39;TENSOR&#39;, (12, 128, 64), &#39;float32&#39;), (12, 128, 128)). A fallback configuration is used, which may bring great performance regression.</span>
<span class="c1"># Runtime:             10.64 ms            (0.29 ms)</span>
<span class="c1"># Block Sparse Model with 1x1 blocks:</span>
<span class="c1"># Runtime:             6.46 ms             (0.05 ms)</span>
</pre></div>
</div>
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-how-to-deploy-models-deploy-sparse-py">
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../../_downloads/9c3764c88ab3eb57dc223b4eda1e8a2f/deploy_sparse.py"><code class="xref download docutils literal notranslate"><span class="pre">下载Python源代码:</span> <span class="pre">deploy_sparse.py</span></code></a></p>
</div>
<div class="sphx-glr-download docutils container">
<p><a class="reference download internal" download="" href="../../_downloads/0b60295044fd20226a0d5adc52b50b2f/deploy_sparse.ipynb"><code class="xref download docutils literal notranslate"><span class="pre">下载</span> <span class="pre">Jupyter</span> <span class="pre">notebook:</span> <span class="pre">deploy_sparse.ipynb</span></code></a></p>
</div>
</div>
<p class="sphx-glr-signature"><a class="reference external" href="https://sphinx-gallery.github.io">Gallery generated by Sphinx-Gallery</a></p>
</div>
</div>


           </div>
           
          </div>
          

<footer>

    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="deploy_ssd_gluoncv.html" class="btn btn-neutral float-right" title="部署Single Shot Multibox Detector(SSD)模型" accesskey="n" rel="next">下一个 <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="deploy_quantized.html" class="btn btn-neutral float-left" title="在Cuda上部署一个量化模型" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> 上一个</a>
      
    </div>

<div id="button" class="backtop"><img src="../../_static//img/right.svg" alt="backtop"/> </div>
<section class="footerSec">
    <div class="footerHeader">
      <ul class="d-flex align-md-items-center justify-content-between flex-column flex-md-row">
        <li class="copywrite d-flex align-items-center">
          <h5 id="copy-right-info">© 2020 Apache Software Foundation | All right reserved</h5>
        </li>
      </ul>

    </div>

    <ul>
      <li class="footernote">Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather, and the Apache TVM project logo are either trademarks or registered trademarks of the Apache Software Foundation.</li>
    </ul>

</section>
</footer>
        </div>
      </div>

    </section>

  </div>
  

    <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script>
    <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" crossorigin="anonymous"></script>

  </body>
  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
    <!-- Theme Analytics -->
    <script>
    (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
      (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
      m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
    })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

    ga('create', 'UA-75982049-2', 'auto');
    ga('send', 'pageview');
    </script>

    
   

</body>
</html>