<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8" /><meta name="generator" content="Docutils 0.18.1: http://docutils.sourceforge.net/" />

  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
  
<!-- OneTrust Cookies Consent Notice start for xilinx.github.io -->

<script src="https://cdn.cookielaw.org/scripttemplates/otSDKStub.js" data-document-language="true" type="text/javascript" charset="UTF-8" data-domain-script="03af8d57-0a04-47a6-8f10-322fa00d8fc7" ></script>
<script type="text/javascript">
function OptanonWrapper() { }
</script>
<!-- OneTrust Cookies Consent Notice end for xilinx.github.io -->
<!-- Google Tag Manager -->
<script type="text/plain" class="optanon-category-C0002">(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'//www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-5RHQV7');</script>
<!-- End Google Tag Manager -->
  <title>Quick Start Guide for Alveo V70 &mdash; Vitis™ AI 3.5 documentation</title>
      <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />
      <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
      <link rel="stylesheet" href="../../_static/_static/custom.css" type="text/css" />
  <!--[if lt IE 9]>
    <script src="../../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
        <script data-url_root="../../" id="documentation_options" src="../../_static/documentation_options.js"></script>
        <script src="../../_static/jquery.js"></script>
        <script src="../../_static/underscore.js"></script>
        <script src="../../_static/_sphinx_javascript_frameworks_compat.js"></script>
        <script src="../../_static/doctools.js"></script>
    <script src="../../_static/js/theme.js"></script>
    <link rel="index" title="Index" href="../../genindex.html" />
    <link rel="search" title="Search" href="../../search.html" />
    <link rel="next" title="Overview" href="../workflow.html" />
    <link rel="prev" title="Quick Start Guide for Versal™ AI Edge VEK280" href="vek280.html" /> 
</head>

<body class="wy-body-for-nav">

<!-- Google Tag Manager -->
<noscript><iframe src="//www.googletagmanager.com/ns.html?id=GTM-5RHQV7" height="0" width="0" style="display:none;visibility:hidden" class="optanon-category-C0002"></iframe></noscript>
<!-- End Google Tag Manager --> 
  <div class="wy-grid-for-nav">
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search">
            <a href="../../index.html" class="icon icon-home"> Vitis™ AI
            <img src="../../_static/xilinx-header-logo.svg" class="logo" alt="Logo"/>
          </a>
              <div class="version">
                3.5
              </div>
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>
        </div><div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="Navigation menu">
              <p class="caption" role="heading"><span class="caption-text">Setup and Install</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../reference/release_notes.html">Release Notes</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/system_requirements.html">System Requirements</a></li>
<li class="toctree-l1"><a class="reference internal" href="../install/install.html">Host Install Instructions</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Quick Start Guides</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="vek280.html">Versal™ AI Edge VEK280</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">Alveo™ V70</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#prerequisites">Prerequisites</a><ul>
<li class="toctree-l3"><a class="reference internal" href="#system-requirements">System Requirements</a></li>
<li class="toctree-l3"><a class="reference internal" href="#applicable-targets">Applicable Targets</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#quickstart">Quickstart</a><ul>
<li class="toctree-l3"><a class="reference internal" href="#clone-the-vitis-ai-repository">Clone the Vitis AI Repository</a></li>
<li class="toctree-l3"><a class="reference internal" href="#alveo-v70-setup">Alveo V70 Setup</a></li>
<li class="toctree-l3"><a class="reference internal" href="#install-docker">Install Docker</a></li>
<li class="toctree-l3"><a class="reference internal" href="#verify-docker-installation">Verify Docker Installation</a></li>
<li class="toctree-l3"><a class="reference internal" href="#pull-vitis-ai-docker">Pull Vitis AI Docker</a></li>
<li class="toctree-l3"><a class="reference internal" href="#docker-container-environment-variable-setup">Docker Container Environment Variable Setup</a></li>
<li class="toctree-l3"><a class="reference internal" href="#vitis-ai-model-zoo">Vitis-AI Model Zoo</a></li>
<li class="toctree-l3"><a class="reference internal" href="#run-the-vitis-ai-examples">Run the Vitis AI Examples</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#pytorch-tutorial">PyTorch Tutorial</a><ul>
<li class="toctree-l3"><a class="reference internal" href="#quantizing-the-model">Quantizing the Model</a></li>
<li class="toctree-l3"><a class="reference internal" href="#compile-the-model">Compile the Model</a></li>
<li class="toctree-l3"><a class="reference internal" href="#model-deployment">Model Deployment</a></li>
</ul>
</li>
</ul>
</li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Workflow and Components</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../workflow.html">Overview</a></li>
<li class="toctree-l1"><a class="reference internal" href="../workflow-system-integration.html">DPU IP Details and System Integration</a></li>
<li class="toctree-l1"><a class="reference internal" href="../workflow-model-zoo.html">Vitis™ AI Model Zoo</a></li>
<li class="toctree-l1"><a class="reference internal" href="../workflow-model-development.html">Developing a Model for Vitis AI</a></li>
<li class="toctree-l1"><a class="reference internal" href="../workflow-model-deployment.html">Deploying a Model with Vitis AI</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Runtime API Documentation</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../doxygen/api/classlist.html">C++ API Class</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../doxygen/api/pythonlist.html">Python APIs</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Additional Information</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../reference/release_documentation.html">Vitis™ AI User Guides &amp; IP Product Guides</a></li>
<li class="toctree-l1"><a class="reference external" href="https://github.com/Xilinx/Vitis-AI-Tutorials">Vitis™ AI Developer Tutorials</a></li>
<li class="toctree-l1"><a class="reference internal" href="../workflow-third-party.html">Third-party Inference Stack Integration</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/version_compatibility.html">IP and Tools Compatibility</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/faq.html">Frequently Asked Questions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../install/branching_tagging_strategy.html">Branching and Tagging Strategy</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Resources and Support</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../reference/additional_resources.html">Technical Support</a></li>
<li class="toctree-l1"><a class="reference internal" href="../reference/additional_resources.html#id1">Additional Resources</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Related AMD Solutions</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://github.com/Xilinx/DPU-PYNQ">DPU-PYNQ</a></li>
<li class="toctree-l1"><a class="reference external" href="https://xilinx.github.io/finn/">FINN &amp; Brevitas</a></li>
<li class="toctree-l1"><a class="reference external" href="https://xilinx.github.io/inference-server/">Inference Server</a></li>
<li class="toctree-l1"><a class="reference external" href="https://github.com/amd/UIF">Unified Inference Frontend</a></li>
<li class="toctree-l1"><a class="reference external" href="https://ryzenai.docs.amd.com/en/latest/">Ryzen™ AI Developer Guide ~July 29</a></li>
<li class="toctree-l1"><a class="reference external" href="https://onnxruntime.ai/docs/execution-providers/community-maintained/Vitis-AI-ExecutionProvider.html">Vitis™ AI ONNX Runtime Execution Provider</a></li>
<li class="toctree-l1"><a class="reference external" href="https://xilinx.github.io/VVAS/">Vitis™ Video Analytics SDK</a></li>
</ul>

        </div>
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"><nav class="wy-nav-top" aria-label="Mobile navigation menu"  style="background: black" >
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../../index.html">Vitis™ AI</a>
      </nav>

      <div class="wy-nav-content">
        <div class="rst-content">
          <div role="navigation" aria-label="Page navigation">
  <ul class="wy-breadcrumbs">
      <li><a href="../../index.html" class="icon icon-home"></a> &raquo;</li>
      <li>Quick Start Guide for Alveo V70</li>
      <li class="wy-breadcrumbs-aside">
            <a href="../../_sources/docs/quickstart/v70.rst.txt" rel="nofollow"> View page source</a>
      </li>
  </ul>
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
             
  <section id="quick-start-guide-for-alveo-v70">
<h1>Quick Start Guide for Alveo V70<a class="headerlink" href="#quick-start-guide-for-alveo-v70" title="Permalink to this heading">¶</a></h1>
<p>The AMD <strong>DPUCV2DX8G</strong> for the Alveo™ V70 is a configurable computation engine dedicated to convolutional neural networks. It supports a highly optimized instruction set, enabling the deployment of most convolutional neural networks. The following instructions will help you install the software and packages required to support V70.</p>
<a class="reference internal image-reference" href="../../_images/V70.PNG"><img alt="../../_images/V70.PNG" src="../../_images/V70.PNG" style="width: 1300px;" /></a>
<section id="prerequisites">
<h2>Prerequisites<a class="headerlink" href="#prerequisites" title="Permalink to this heading">¶</a></h2>
<section id="system-requirements">
<h3>System Requirements<a class="headerlink" href="#system-requirements" title="Permalink to this heading">¶</a></h3>
<ul class="simple">
<li><p>Confirm that your development machine meets the minimum <a class="reference internal" href="../reference/system_requirements.html"><span class="doc">Host System Requirements</span></a>.</p></li>
<li><p>Confirm that you have at least <strong>100GB</strong> of free space in the target partition.</p></li>
</ul>
</section>
<section id="applicable-targets">
<h3>Applicable Targets<a class="headerlink" href="#applicable-targets" title="Permalink to this heading">¶</a></h3>
<ul class="simple">
<li><p>This quickstart is applicable to the <a class="reference external" href="https://www.xilinx.com/applications/data-center/v70.html">V70</a></p></li>
</ul>
</section>
</section>
<section id="quickstart">
<h2>Quickstart<a class="headerlink" href="#quickstart" title="Permalink to this heading">¶</a></h2>
<section id="clone-the-vitis-ai-repository">
<h3>Clone the Vitis AI Repository<a class="headerlink" href="#clone-the-vitis-ai-repository" title="Permalink to this heading">¶</a></h3>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Host<span class="o">]</span> $ git clone https://github.com/Xilinx/Vitis-AI
</pre></div>
</div>
</section>
<section id="alveo-v70-setup">
<h3>Alveo V70 Setup<a class="headerlink" href="#alveo-v70-setup" title="Permalink to this heading">¶</a></h3>
<p>A script is provided to drive the V70 card setup process.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>You should run this script on the host machine, OUTSIDE of the Docker container. After the script has executed successfully, manually reboot the host server once. For data center DPUs, Vitis™ AI 3.5 specifically leverages the 2022.2 versions of the Vitis tools, V70 platform, XRT and XRM.  No, that is not a typo - 2022 is correct.</p>
</div>
<p>This script will detect the operating system of the host, and will download and install the appropriate packages for that operating system.</p>
<p>Execute this script as follows:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span>[Host] $ cd &lt;Vitis-AI install path&gt;/Vitis-AI/board_setup/v70
[Host] $ source ./install.sh
</pre></div>
</div>
<p>The following installation steps are performed by this script:</p>
<ol class="arabic simple">
<li><p>XRT Installation. The <a class="reference external" href="https://github.com/Xilinx/XRT">Xilinx RunTime (XRT)</a> is a combination of userspace and kernel driver components supporting PCIe accelerator cards such as the V70.</p></li>
<li><p>XRM Installation. The <a class="reference external" href="https://github.com/Xilinx/XRM/">Xilinx Resource Manager (XRM)</a> manages and controls FPGA resources on the host. It is required by the runtime.</p></li>
<li><p>Installation of the V70 platform.</p></li>
<li><p>Installation of the DPU xclbin for the V70 platform.</p></li>
</ol>
<p>After the script is executed successfully, use the XRT <cite>xbutil</cite> command to check that the installation was successful. The result should contain the correct information for System Configuration, XRT and Devices present.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span>[Host] $ /opt/xilinx/xrt/bin/xbutil examine
</pre></div>
</div>
</section>
<section id="install-docker">
<h3>Install Docker<a class="headerlink" href="#install-docker" title="Permalink to this heading">¶</a></h3>
<ul class="simple">
<li><p>Make sure that the Docker engine is installed according to the official Docker <a class="reference external" href="https://docs.docker.com/engine/install/">documentation</a>.</p></li>
<li><p>The Docker daemon always runs as the root user. Non-root users must be <a class="reference external" href="https://docs.docker.com/engine/install/linux-postinstall/">added</a> to the docker group. Do this now.</p></li>
</ul>
</section>
<section id="verify-docker-installation">
<h3>Verify Docker Installation<a class="headerlink" href="#verify-docker-installation" title="Permalink to this heading">¶</a></h3>
<ul class="simple">
<li><p>Perform a quick and simple test of your Docker installation by executing the following command.  This command will download a test image from Docker Hub and run it in a container. When the container runs successfully, it prints a “Hello World” message and exits.</p></li>
</ul>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Host<span class="o">]</span> $ docker run hello-world
</pre></div>
</div>
<ul class="simple">
<li><p>Finally, verify that the version of Docker that you have installed meets the minimum <a class="reference internal" href="../reference/system_requirements.html"><span class="doc">Host System Requirements</span></a> by running the following command</p></li>
</ul>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Host<span class="o">]</span> $ docker --version
</pre></div>
</div>
</section>
<section id="pull-vitis-ai-docker">
<h3>Pull Vitis AI Docker<a class="headerlink" href="#pull-vitis-ai-docker" title="Permalink to this heading">¶</a></h3>
<p>In order to simplify this quickstart tutorial, we will utilize the Vitis-AI PyTorch CPU Docker to assess pre-built Vitis-AI examples, and subsequently perform quantization and compilation of our own model. The CPU docker image is generic, does not require the user to build the container, and has no specific GPU enablement requirements.  More advanced users can optionally skip this step and jump to the <a class="reference internal" href="../install/install.html"><span class="doc">Full Install Instructions</span></a> but we would recommend that new users start with this simpler first step.
Pull and start the latest Vitis AI Docker using the following commands:</p>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Host<span class="o">]</span> $ docker pull xilinx/vitis-ai-pytorch-cpu:latest
<span class="o">[</span>Host<span class="o">]</span> $ <span class="nb">cd</span> &lt;Vitis-AI install path&gt;/Vitis-AI/
<span class="o">[</span>Host<span class="o">]</span> $ ./docker_run.sh xilinx/vitis-ai-pytorch-cpu:latest
</pre></div>
</div>
</section>
<section id="docker-container-environment-variable-setup">
<h3>Docker Container Environment Variable Setup<a class="headerlink" href="#docker-container-environment-variable-setup" title="Permalink to this heading">¶</a></h3>
<p>From inside the docker container, execute one of the following commands to set the required environment variables for the DPU.  Note that the chosen xclbin file must be in the <code class="docutils literal notranslate"><span class="pre">/opt/xilinx/overlaybins</span></code> directory prior to execution. Select the xclbin that matches your chosen DPU configuration.</p>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ <span class="nb">source</span> /workspace/board_setup/v70/setup.sh DPUCV2DX8G_v70
</pre></div>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>You will need to execute this script each time you re-enter the Docker container.</p>
</div>
</section>
<section id="vitis-ai-model-zoo">
<h3>Vitis-AI Model Zoo<a class="headerlink" href="#vitis-ai-model-zoo" title="Permalink to this heading">¶</a></h3>
<p>You can now select a model from the <a class="reference external" href="../workflow-model-zoo.html">Vitis AI Model Zoo</a>.  Navigate to the  <a class="reference external" href="https://github.com/Xilinx/Vitis-AI/tree/master/model_zoo/model-list">model-list subdirectory</a>  and select the model that you wish to test. For each model, a YAML file provides key details of the model. In the YAML file there are separate hyperlinks to download the model for each supported target.  Choose the correct link for your target platform and download the model.</p>
<ul class="simple">
<li><p>Take the ResNet50 model as an example.</p></li>
</ul>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ wget https://www.xilinx.com/bin/public/openDownload?filename<span class="o">=</span>resnet50-v70-DPUCV2DX8G-r3.5.0.tar.gz -O resnet50-v70-DPUCV2DX8G-r3.5.0.tar.gz
<span class="o">[</span>Docker<span class="o">]</span> $ tar -xzvf resnet50-v70-DPUCV2DX8G-r3.5.0.tar.gz -C /usr/share/vitis_ai_library/models/
</pre></div>
</div>
<ul class="simple">
<li><p>If the <code class="docutils literal notranslate"><span class="pre">/usr/share/vitis_ai_library/models</span></code> folder does not exist, create it first.</p></li>
</ul>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ sudo mkdir /usr/share/vitis_ai_library/models
</pre></div>
</div>
</section>
<section id="run-the-vitis-ai-examples">
<h3>Run the Vitis AI Examples<a class="headerlink" href="#run-the-vitis-ai-examples" title="Permalink to this heading">¶</a></h3>
<ol class="arabic simple">
<li><p>Download <a class="reference external" href="https://www.xilinx.com/bin/public/openDownload?filename=vitis_ai_runtime_r3.5.0_image_video.tar.gz">vitis_ai_runtime_r3.5.0_image_video.tar.gz</a> to your host.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ <span class="nb">cd</span> ~
<span class="o">[</span>Docker<span class="o">]</span> $ wget https://www.xilinx.com/bin/public/openDownload?filename<span class="o">=</span>vitis_ai_runtime_r3.5.0_image_video.tar.gz -O vitis_ai_runtime_r3.5.0_image_video.tar.gz
</pre></div>
</div>
<ol class="arabic simple" start="2">
<li><p>Extract the <code class="docutils literal notranslate"><span class="pre">vitis_ai_runtime_r3.5.0_image_video.tar.gz</span></code> package.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ tar -xzvf vitis_ai_runtime_r3.5.0_image_video.tar.gz -C /workspace/examples/vai_runtime
</pre></div>
</div>
<ol class="arabic simple" start="3">
<li><p>Navigate to the example directory.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ <span class="nb">cd</span> /workspace/examples/vai_runtime/resnet50
</pre></div>
</div>
<ol class="arabic simple" start="4">
<li><p>Compile the example.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ sudo chmod u+r+x build.sh
<span class="o">[</span>Docker<span class="o">]</span> $ bash -x build.sh
</pre></div>
</div>
<ol class="arabic simple" start="5">
<li><p>Run the example.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ ./resnet50 /usr/share/vitis_ai_library/models/resnet50/resnet50.xmodel
</pre></div>
</div>
<ol class="arabic simple" start="6">
<li><p>The console should reflect the following output:</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span>Image : <span class="m">001</span>.jpg
top<span class="o">[</span><span class="m">0</span><span class="o">]</span> <span class="nv">prob</span> <span class="o">=</span> <span class="m">0</span>.982662  <span class="nv">name</span> <span class="o">=</span> brain coral
top<span class="o">[</span><span class="m">1</span><span class="o">]</span> <span class="nv">prob</span> <span class="o">=</span> <span class="m">0</span>.008502  <span class="nv">name</span> <span class="o">=</span> coral reef
top<span class="o">[</span><span class="m">2</span><span class="o">]</span> <span class="nv">prob</span> <span class="o">=</span> <span class="m">0</span>.006621  <span class="nv">name</span> <span class="o">=</span> jackfruit, jak, jack
top<span class="o">[</span><span class="m">3</span><span class="o">]</span> <span class="nv">prob</span> <span class="o">=</span> <span class="m">0</span>.000543  <span class="nv">name</span> <span class="o">=</span> puffer, pufferfish, blowfish, globefish
top<span class="o">[</span><span class="m">4</span><span class="o">]</span> <span class="nv">prob</span> <span class="o">=</span> <span class="m">0</span>.000330  <span class="nv">name</span> <span class="o">=</span> eel
</pre></div>
</div>
<p>These results reflect the classification of a single test image located in the <code class="docutils literal notranslate"><span class="pre">Vitis-AI/examples/vai_library/images</span></code> directory.</p>
</section>
</section>
<section id="pytorch-tutorial">
<h2>PyTorch Tutorial<a class="headerlink" href="#pytorch-tutorial" title="Permalink to this heading">¶</a></h2>
<p>This tutorial assumes that Vitis AI has been installed and that the board has been configured as explained in the installation instructions above. For additional information on the Vitis AI Quantizer, Optimizer, or Compiler, please refer to the Vitis AI User Guide.</p>
<section id="quantizing-the-model">
<h3>Quantizing the Model<a class="headerlink" href="#quantizing-the-model" title="Permalink to this heading">¶</a></h3>
<p>Quantization reduces the precision of network weights and activations to optimize memory usage and computational efficiency while maintaining acceptable levels of accuracy. Inference is computationally expensive and requires high memory bandwidths to satisfy the
low-latency and high-throughput requirements of Edge applications. Quantization and channel pruning techniques are employed to address these issues while achieving high performance and high energy efficiency with little degradation in accuracy. The Vitis AI Quantizer takes a
floating-point model as an input and performs pre-processing (folds batchnorms and removes nodes not required for inference), and finally quantizes the weights/biases and activations to the given bit width.</p>
<ol class="arabic simple">
<li><p>Navigate to the cloned Vitis-AI directory and create a new workspace for your project.  Here you will store the test dataset, models, and python scripts required for quantization.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Host<span class="o">]</span> $ <span class="nb">cd</span> ~/Vitis-AI
<span class="o">[</span>Host<span class="o">]</span> $ mkdir -p resnet18/model
</pre></div>
</div>
<ol class="arabic simple" start="2">
<li><p>Download the <a class="reference external" href="https://www.kaggle.com/datasets/ifigotin/imagenetmini-1000/download?datasetVersionNumber=1">ImageNet 1000 (mini)</a> dataset from Kaggle. This dataset is subset of the ILSVRC 2012-2017 dataset and comprises 1000 object classes, and contains 1,281,167 training, 50,000 validation, and 100,000 test images.  You will need to create a Kaggle account to access this dataset.  Move the downloaded Archive.zip file into the created /Vitis-AI/resnet18 folder and unzip the dataset.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Host<span class="o">]</span> $ <span class="nb">cd</span> resnet18
<span class="o">[</span>Host<span class="o">]</span> $ unzip Archive.zip
</pre></div>
</div>
<ul class="simple">
<li><p>Your workspace directory should reflect the following:</p></li>
</ul>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span>├── Archive.zip
│
├── model
│
└── imagenet-mini
        ├── train                    # Training images folder. Will not be used in this tutorial.
        │   └─── n01440764           # Class folders to group images.
        └── val                      # Validation images that will be used for quantization and evaluation of the floating point model.
            └─── n01440764
</pre></div>
</div>
<ol class="arabic simple" start="3">
<li><p>Navigate to the Vitis-AI directory and execute the following command to start Docker.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Host<span class="o">]</span> $ <span class="nb">cd</span> ..
<span class="o">[</span>Host<span class="o">]</span> ./docker_run.sh xilinx/vitis-ai-pytorch-cpu:latest
</pre></div>
</div>
<ul class="simple">
<li><p>Note that when you start Docker appropriate as shown above, your <code class="docutils literal notranslate"><span class="pre">/workspace</span></code> folder will correspond to <code class="docutils literal notranslate"><span class="pre">/Vitis-AI</span></code> and your initial path in Docker will be <code class="docutils literal notranslate"><span class="pre">/workspace</span></code>.  If you inspect <code class="docutils literal notranslate"><span class="pre">docker_run.sh</span></code> you can see that the -v option is leveraged which links the Docker file system to your Host file system.  Verify that you see the created <code class="docutils literal notranslate"><span class="pre">/resnet18</span></code> subfolder in your workspace:</p></li>
</ul>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ ls
</pre></div>
</div>
<ol class="arabic simple" start="4">
<li><p>Next, download the pre-trained resnet18 model from PyTorch to the docker environment and store it in the  <code class="docutils literal notranslate"><span class="pre">model</span></code>  folder . This is the floating point (FP32) model that will be quantized to INT8 precision for deployment on the target. Also, since you have re-entered the Docker container, you need to re-run the setup script.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ <span class="nb">source</span> /workspace/board_setup/v70/setup.sh DPUCV2DX8G_v70
<span class="o">[</span>Docker<span class="o">]</span> $ <span class="nb">cd</span> resnet18/model
<span class="o">[</span>Docker<span class="o">]</span> $ wget https://download.pytorch.org/models/resnet18-5c106cde.pth -O resnet18.pth
</pre></div>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>The <a class="reference external" href="../workflow-model-zoo.html">Vitis AI Model Zoo</a> also provides optimized deep learning models to speed up the deployment of deep learning inference on adaptable AMD platforms. For this tutorial we have chosen to use an open-source PyTorch model to showcase that models from the community can also be deployed.</p>
</div>
<ol class="arabic simple" start="5">
<li><p>Copy the example Vitis AI ResNet18 quantization script to your workspace. This script contains the Quantizer API calls that will be executed in order to quantize the model.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ <span class="nb">cd</span> ..
<span class="o">[</span>Docker<span class="o">]</span> $ cp ../src/vai_quantizer/vai_q_pytorch/example/resnet18_quant.py ./
</pre></div>
</div>
<ul class="simple">
<li><p>Your <code class="docutils literal notranslate"><span class="pre">workspace/resnet18</span></code> directory should reflect the following:</p></li>
</ul>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span>├── Archive.zip
│
├── model
│   └── resnet18.pth             # ResNet18 floating point model downloaded from PyTorch.
│
├── imagenet-mini
│   ├── train                    # Training images folder. Will not be used in this tutorial.
│   │   └─── n01440764           # Class folders to group images.
│   └── val                      # Validation images that will be used for quantization and evaluation of the floating point model.
│       └─── n01440764
│
└── resnet18_quant.py            # Quantization python script.
</pre></div>
</div>
<ul class="simple">
<li><p>Inspect <code class="docutils literal notranslate"><span class="pre">resnet18_quant.py</span></code>.  Observe the parser arguments that can be passed to the script via command line switches <code class="docutils literal notranslate"><span class="pre">subset_len</span></code> <code class="docutils literal notranslate"><span class="pre">quant_mode</span></code> <code class="docutils literal notranslate"><span class="pre">data_dir</span></code> and <code class="docutils literal notranslate"><span class="pre">model_dir</span></code>.  We will set the <code class="docutils literal notranslate"><span class="pre">data_dir</span></code> and <code class="docutils literal notranslate"><span class="pre">model_dir</span></code> arguments to align with our directory structure.  If you wish to avoid extraneous typing and are manually entering these commands, you can simply edit the script to suit your use case.</p></li>
</ul>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ vim resnet18_quant.py
</pre></div>
</div>
<ul class="simple">
<li><p>Use the sequence <code class="docutils literal notranslate"><span class="pre">&lt;esc&gt;</span> <span class="pre">:q!</span></code> to exit vim without saving.</p></li>
</ul>
<ol class="arabic simple" start="6">
<li><p>Run the command below to evaluate the accuracy of the floating point model.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ python resnet18_quant.py --quant_mode float --data_dir imagenet-mini --model_dir model
</pre></div>
</div>
<ul class="simple">
<li><p>You should observe that the accuracy reported is similar to  <code class="docutils literal notranslate"><span class="pre">top-1</span> <span class="pre">/</span> <span class="pre">top-5</span> <span class="pre">accuracy:</span> <span class="pre">69.9975</span> <span class="pre">/</span> <span class="pre">88.7586</span></code></p></li>
</ul>
<ol class="arabic simple" start="7">
<li><p>Next, let’s activate the pytorch conda env and run the Model Inspector to confirm that this model should be compatible with the target DPU architecture.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ conda activate vitis-ai-pytorch
<span class="o">[</span>Docker<span class="o">]</span> $ python resnet18_quant.py --quant_mode float --inspect --target DPUCV2DX8G_ISA1_C20B14 --model_dir model
</pre></div>
</div>
<ol class="arabic simple" start="8">
<li><p>Run the command below to start quantization. Generally, 100-1000 images are required for quantization and the number of iterations can be controlled through the the <code class="docutils literal notranslate"><span class="pre">subset_len</span></code> data loading argument. In this case, 200 images are forward propagated through the network, and these images are chosen randomly from the validation image set.  Note that the displayed loss and accuracy that are output from this process are not representative of final model accuracy.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ python resnet18_quant.py --quant_mode calib --data_dir imagenet-mini --model_dir model --subset_len <span class="m">200</span>
</pre></div>
</div>
<ul class="simple">
<li><p>On most host machines this command should complete in less than 1 minute even with the CPU-only Docker.  If you leverage the CUDA or ROCm Dockers on a compatible machine, the Quantization process will be accelerated considerably.  Let’s take a look at the output:</p></li>
</ul>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ <span class="nb">cd</span> quantize_result
<span class="o">[</span>Docker<span class="o">]</span> $ ls
</pre></div>
</div>
<ul>
<li><p>If the command ran successfully, the output directory <code class="docutils literal notranslate"><span class="pre">quantize_result</span></code> will be generated, containing two important files:</p>
<blockquote>
<div><dl class="simple">
<dt>-<code class="docutils literal notranslate"><span class="pre">ResNet.py</span></code></dt><dd><p>The quantized vai_q_pytorch format model.</p>
</dd>
<dt>-<code class="docutils literal notranslate"><span class="pre">Quant_info.json</span></code></dt><dd><p>Quantization steps of tensors. Retain this file for evaluation of the quantized model./</p>
</dd>
</dl>
</div></blockquote>
</li>
</ul>
<ol class="arabic simple" start="9">
<li><p>To evaluate the accuracy of the quantized model, return to the <code class="docutils literal notranslate"><span class="pre">/resnet18</span></code> directory run the following commands.  Note that on CPU-only host machines this command will take some time to complete (~20 minutes).  If you are in a hurry, you can skip this step and move to the next.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ <span class="nb">cd</span> ..
<span class="o">[</span>Docker<span class="o">]</span> $ python resnet18_quant.py --model_dir model --data_dir imagenet-mini --quant_mode <span class="nb">test</span>
</pre></div>
</div>
<p>You should observe that the accuracy reported will be similar to <code class="docutils literal notranslate"><span class="pre">top-1</span> <span class="pre">/</span> <span class="pre">top-5</span> <span class="pre">accuracy:</span> <span class="pre">69.1308</span> <span class="pre">/</span> <span class="pre">88.7076</span></code>.  The net accuracy loss due to quantization is less than 1%.</p>
<ol class="arabic simple" start="10">
<li><p>To generate the quantized <code class="docutils literal notranslate"><span class="pre">.xmodel</span></code> file that will subsequently be compiled for the DPU, run the following command with <code class="docutils literal notranslate"><span class="pre">batch_size</span></code> and <code class="docutils literal notranslate"><span class="pre">subset_len</span></code> arguments set to 1 to avoid redundant iterations.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ python resnet18_quant.py --quant_mode <span class="nb">test</span> --subset_len <span class="m">1</span> --batch_size<span class="o">=</span><span class="m">1</span> --model_dir model --data_dir imagenet-mini --deploy
</pre></div>
</div>
</section>
<section id="compile-the-model">
<h3>Compile the Model<a class="headerlink" href="#compile-the-model" title="Permalink to this heading">¶</a></h3>
<p>The Vitis AI Compiler compiles the graph operators as a set of micro-coded instructions that are executed by the DPU.  In this step, we will compile the ResNet18 model that we quantized in the previous step.</p>
<ol class="arabic simple">
<li><p>The compiler takes the quantized <code class="docutils literal notranslate"><span class="pre">INT8.xmodel</span></code> and generates the deployable <code class="docutils literal notranslate"><span class="pre">DPU.xmodel</span></code> by running the command below.  Note that you must modify the command to specify the appropriate <code class="docutils literal notranslate"><span class="pre">arch.json</span></code> file for your target.  For V70 targets, these are located in the folder <code class="docutils literal notranslate"><span class="pre">/opt/vitis_ai/compiler/arch/DPUCV2DX8G</span></code> inside the Docker container.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ <span class="nb">cd</span> /workspace/resnet18
<span class="o">[</span>Docker<span class="o">]</span> $ vai_c_xir -x quantize_result/ResNet_int.xmodel -a /opt/vitis_ai/compiler/arch/DPUCV2DX8G/V70/arch.json -o resnet18_pt -n resnet18_pt
</pre></div>
</div>
<ul class="simple">
<li><p>If compilation is successful, the <code class="docutils literal notranslate"><span class="pre">resnet18_pt.xmodel</span></code> file should be generated according to the specified DPU architecture.</p></li>
</ul>
<ol class="arabic simple" start="2">
<li><p>Create a new file with your text editor of choice and name the file <code class="docutils literal notranslate"><span class="pre">resnet18_pt.prototxt</span></code>. Copy and paste the following lines of code:</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span>model <span class="o">{</span>
   name : <span class="s2">&quot;resnet18_pt&quot;</span>
   kernel <span class="o">{</span>
         name: <span class="s2">&quot;resnet18_pt_0&quot;</span>
         mean: <span class="m">103</span>.53
         mean: <span class="m">116</span>.28
         mean: <span class="m">123</span>.675
         scale: <span class="m">0</span>.017429
         scale: <span class="m">0</span>.017507
         scale: <span class="m">0</span>.01712475
   <span class="o">}</span>
   model_type : CLASSIFICATION
   classification_param <span class="o">{</span>
          top_k : <span class="m">5</span>
          test_accuracy : <span class="nb">false</span>
          preprocess_type : VGG_PREPROCESS
   <span class="o">}</span>
<span class="o">}</span>
</pre></div>
</div>
<ul class="simple">
<li><p>The <code class="docutils literal notranslate"><span class="pre">.prototxt</span></code> file is a Vitis™ AI configuration file that facilitates the uniform configuration management of model parameters. Please refer to the Vitis AI User Guide to learn more.</p></li>
<li><p>We can now deploy the quantized and compiled model on the V70 accelerator card.</p></li>
</ul>
</section>
<section id="model-deployment">
<h3>Model Deployment<a class="headerlink" href="#model-deployment" title="Permalink to this heading">¶</a></h3>
<ol class="arabic simple">
<li><p>Copy the <code class="docutils literal notranslate"><span class="pre">resnet18_pt</span></code> folder into the <code class="docutils literal notranslate"><span class="pre">/usr/share/vitis_ai_library/models/</span></code> directory.  This will locate your compiled model in the default Vitis AI Library example model directory, alongside the other Vitis AI example models.  Our purpose in doing this is to simplify the commands that follow, in which we will execute the Vitis AI Library samples with our model.</p></li>
</ol>
<p>2. The <a class="reference external" href="https://www.xilinx.com/bin/public/openDownload?filename=vitis_ai_library_r3.5.0_images.tar.gz">vitis_ai_library_r3.5.0_images.tar.gz</a> and <a class="reference external" href="https://www.xilinx.com/bin/public/openDownload?filename=vitis_ai_library_r3.5.0_video.tar.gz">vitis_ai_library_r3.5.0_video.tar.gz</a> packages
contain test images and videos that can be leveraged to evaluate our quantized model and other pre-built Vitis-AI Library examples.</p>
<blockquote>
<div><ol class="loweralpha simple">
<li><p>Download the packages.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ <span class="nb">cd</span> /workspace
<span class="o">[</span>Docker<span class="o">]</span> $ wget https://www.xilinx.com/bin/public/openDownload?filename<span class="o">=</span>vitis_ai_library_r3.5.0_images.tar.gz -O vitis_ai_library_r3.5.0_images.tar.gz
<span class="o">[</span>Docker<span class="o">]</span> $ wget https://www.xilinx.com/bin/public/openDownload?filename<span class="o">=</span>vitis_ai_library_r3.5.0_video.tar.gz -O vitis_ai_library_r3.5.0_video.tar.gz
</pre></div>
</div>
<ol class="loweralpha simple" start="2">
<li><p>Untar the files.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ tar -xzvf vitis_ai_library_r3.5.0_images.tar.gz -C /workspace/examples/vai_library/
<span class="o">[</span>Docker<span class="o">]</span> $ tar -xzvf vitis_ai_library_r3.5.0_video.tar.gz -C /workspace/examples/vai_library/
</pre></div>
</div>
</div></blockquote>
<ol class="arabic simple" start="3">
<li><p>Enter the directory of the sample and then compile it.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ <span class="nb">cd</span> /workspace/examples/vai_library/samples/classification
<span class="o">[</span>Docker<span class="o">]</span> $ ./build.sh
</pre></div>
</div>
<ol class="arabic simple" start="4">
<li><p>Execute the single-image test application.</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ ./test_jpeg_classification resnet18_pt /workspace/examples/vai_library/samples/classification/images/001.JPEG
</pre></div>
</div>
<p>If you wish to do so, you can review the <cite>result.jpg</cite> file.  OpenCV function calls have been used to overlay the predictions.</p>
<ol class="arabic simple" start="5">
<li><p>To run the video example, run the following command:</p></li>
</ol>
<div class="highlight-Bash notranslate"><div class="highlight"><pre><span></span><span class="o">[</span>Docker<span class="o">]</span> $ ./test_video_classification resnet18_pt /workspace/examples/vai_library/apps/seg_and_pose_detect/pose_960_540.avi -t <span class="m">8</span>
</pre></div>
</div>
<ol class="arabic simple" start="7">
<li><p>The output should be as follows:</p></li>
</ol>
<a class="reference internal image-reference" href="../../_images/Wallace.png"><img alt="../../_images/Wallace.png" src="../../_images/Wallace.png" style="width: 1300px;" /></a>
<ul class="simple">
<li><p>Congratulations! You have successfully quantized, compiled, and deployed a pre-trained model onto the V70 accelerator card.</p></li>
</ul>
</section>
</section>
</section>


           </div>
          </div>
          
				  
				  <footer><div class="rst-footer-buttons" role="navigation" aria-label="Footer">
        <a href="vek280.html" class="btn btn-neutral float-left" title="Quick Start Guide for Versal™ AI Edge VEK280" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
        <a href="../workflow.html" class="btn btn-neutral float-right" title="Overview" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
    </div>

  <hr/>

  <div role="contentinfo">
    <p>&#169; Copyright 2022-2023, Advanced Micro Devices, Inc.
      <span class="lastupdated">Last updated on July 19, 2023.
      </span></p>
  </div>



										<div class="aem-Grid aem-Grid--16">
											<div class="aem-GridColumn aem-GridColumn--xxxlarge--none aem-GridColumn--xsmall--16 aem-GridColumn--offset--xsmall--0 aem-GridColumn--xlarge--none aem-GridColumn--xxlarge--none aem-GridColumn--default--none aem-GridColumn--offset--large--1 aem-GridColumn--xlarge--12 aem-GridColumn--offset--default--0 aem-GridColumn--xxlarge--10 aem-GridColumn--offset--xlarge--2 aem-GridColumn--offset--xxlarge--3 aem-GridColumn--offset--xxxlarge--4 aem-GridColumn--xsmall--none aem-GridColumn--large--none aem-GridColumn aem-GridColumn--large--14 aem-GridColumn--xxxlarge--8 aem-GridColumn--default--16">
												<div class="container-fluid sub-footer">

													                    <div class="row">
                        <div class="col-xs-24">
                          <p><a target="_blank" href="https://www.amd.com/en/corporate/copyright">Terms and Conditions</a> | <a target="_blank" href="https://www.amd.com/en/corporate/privacy">Privacy</a> | <a target="_blank" href="https://www.amd.com/en/corporate/cookies">Cookie Policy</a> | <a target="_blank" href="https://www.amd.com/en/corporate/trademarks">Trademarks</a> | <a target="_blank" href="https://www.amd.com/system/files/documents/statement-human-trafficking-forced-labor.pdf">Statement on Forced Labor</a> | <a target="_blank" href="https://www.amd.com/en/corporate/competition">Fair and Open Competition</a> | <a target="_blank" href="https://www.amd.com/system/files/documents/amd-uk-tax-strategy.pdf">UK Tax Strategy</a> | <a target="_blank" href="https://docs.xilinx.com/v/u/9x6YvZKuWyhJId7y7RQQKA">Inclusive Terminology</a> | <a href="#cookiessettings" class="ot-sdk-show-settings">Cookies Settings</a></p>
                        </div>
                    </div>
												</div>
											</div>
										</div>
										
</br>


  Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
    <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
    provided by <a href="https://readthedocs.org">Read the Docs</a>.
   

</footer>
        </div>
      </div>
    </section>
  </div>
  <script>
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>
 <script type="text/javascript">
    $(document).ready(function() {
        $(".toggle > *").hide();
        $(".toggle .header").show();
        $(".toggle .header").click(function() {
            $(this).parent().children().not(".header").toggle(400);
            $(this).parent().children(".header").toggleClass("open");
        })
    });
</script>


</body>
</html>