


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>torch.autograd.profiler &mdash; PyTorch master documentation</title>
  

  
  
  
  
    <link rel="canonical" href="https://pytorch.org/docs/stable/_modules/torch/autograd/profiler.html"/>
  

  

  
  
    

  

  <link rel="stylesheet" href="../../../_static/css/theme.css" type="text/css" />
  <!-- <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" /> -->
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/css/jit.css" type="text/css" />
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.11.1/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/katex-math.css" type="text/css" />
    <link rel="index" title="Index" href="../../../genindex.html" />
    <link rel="search" title="Search" href="../../../search.html" /> 

  
  <script src="../../../_static/js/modernizr.min.js"></script>

  <!-- Preload the theme fonts -->

<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">

<!-- Preload the katex fonts -->

<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
</head>

<div class="container-fluid header-holder tutorials-header" id="header-holder">
  <div class="container">
    <div class="header-container">
      <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>

      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <div class="ecosystem-dropdown">
              <a id="dropdownMenuButton" data-toggle="ecosystem-dropdown">
                Ecosystem
              </a>
              <div class="ecosystem-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/hub"">
                  <span class=dropdown-title>Models (Beta)</span>
                  <p>Discover, publish, and reuse pre-trained models</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/ecosystem">
                  <span class=dropdown-title>Tools & Libraries</span>
                  <p>Explore the ecosystem of tools and libraries</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <div class="resources-dropdown">
              <a id="resourcesDropdownButton" data-toggle="resources-dropdown">
                Resources
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/resources"">
                  <span class=dropdown-title>Developer Resources</span>
                  <p>Find resources and get questions answered</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/features">
                  <span class=dropdown-title>About</span>
                  <p>Learn about PyTorch’s features and capabilities</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>

      <a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
    </div>

  </div>
</div>


<body class="pytorch-body">

   

    

    <div class="table-of-contents-link-wrapper">
      <span>Table of Contents</span>
      <a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
    </div>

    <nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
      <div class="pytorch-side-scroll">
        <div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          <div class="pytorch-left-menu-search">
            

            
              
              
                <div class="version">
                  master (1.5.0 )
                </div>
              
            

            


  


<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search Docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

            
          </div>

          
<div>
  <a style="color:#F05732" href="https://pytorch.org/docs/stable/_modules/torch/autograd/profiler.html">
    You are viewing unstable developer preview docs.
    Click here to view docs for latest stable release.
  </a>
</div>

            
            
              
            
            
              <p class="caption"><span class="caption-text">Notes</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/amp_examples.html">Automatic Mixed Precision examples</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/autograd.html">Autograd mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/broadcasting.html">Broadcasting semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/cpu_threading_torchscript_inference.html">CPU threading and TorchScript inference</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/cuda.html">CUDA semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/ddp.html">Distributed Data Parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/extending.html">Extending PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/faq.html">Frequently Asked Questions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/large_scale_deployments.html">Features for large-scale deployments</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/multiprocessing.html">Multiprocessing best practices</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/randomness.html">Reproducibility</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/serialization.html">Serialization semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/windows.html">Windows FAQ</a></li>
</ul>
<p class="caption"><span class="caption-text">Language Bindings</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/cppdocs/">C++ API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../packages.html">Javadoc</a></li>
</ul>
<p class="caption"><span class="caption-text">Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../torch.html">torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../nn.html">torch.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../nn.functional.html">torch.nn.functional</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensors.html">torch.Tensor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensor_attributes.html">Tensor Attributes</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensor_view.html">Tensor Views</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../autograd.html">torch.autograd</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../cuda.html">torch.cuda</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../amp.html">torch.cuda.amp</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../distributed.html">torch.distributed</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../distributions.html">torch.distributions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../hub.html">torch.hub</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../jit.html">torch.jit</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../nn.init.html">torch.nn.init</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../onnx.html">torch.onnx</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../optim.html">torch.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../quantization.html">Quantization</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../rpc/index.html">Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../random.html">torch.random</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../sparse.html">torch.sparse</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../storage.html">torch.Storage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../bottleneck.html">torch.utils.bottleneck</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../checkpoint.html">torch.utils.checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../cpp_extension.html">torch.utils.cpp_extension</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../data.html">torch.utils.data</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../dlpack.html">torch.utils.dlpack</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../model_zoo.html">torch.utils.model_zoo</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensorboard.html">torch.utils.tensorboard</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../type_info.html">Type Info</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../named_tensor.html">Named Tensors</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../name_inference.html">Named Tensors operator coverage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../__config__.html">torch.__config__</a></li>
</ul>
<p class="caption"><span class="caption-text">Libraries</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/audio">torchaudio</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/text">torchtext</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/elastic/">TorchElastic</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/serve">TorchServe</a></li>
<li class="toctree-l1"><a class="reference external" href="http://pytorch.org/xla/">PyTorch on XLA Devices</a></li>
</ul>
<p class="caption"><span class="caption-text">Community</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../community/contribution_guide.html">PyTorch Contribution Guide</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../community/governance.html">PyTorch Governance</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../community/persons_of_interest.html">PyTorch Governance | Persons of Interest</a></li>
</ul>

            
          

        </div>
      </div>
    </nav>

    <div class="pytorch-container">
      <div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
        <div class="pytorch-breadcrumbs-wrapper">
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="pytorch-breadcrumbs">
    
      <li>
        <a href="../../../index.html">
          
            Docs
          
        </a> &gt;
      </li>

        
          <li><a href="../../index.html">Module code</a> &gt;</li>
        
          <li><a href="../../torch.html">torch</a> &gt;</li>
        
          <li><a href="../autograd.html">torch.autograd</a> &gt;</li>
        
      <li>torch.autograd.profiler</li>
    
    
      <li class="pytorch-breadcrumbs-aside">
        
      </li>
    
  </ul>

  
</div>
        </div>

        <div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
          Shortcuts
        </div>
      </div>

      <section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
        <div class="pytorch-content-left">

        
          
          <div class="rst-content">
          
            <div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
             <article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
              
  <h1>Source code for torch.autograd.profiler</h1><div class="highlight"><pre>
<span></span><span class="kn">import</span> <span class="nn">itertools</span>
<span class="kn">import</span> <span class="nn">torch</span>

<span class="kn">from</span> <span class="nn">collections</span> <span class="kn">import</span> <span class="n">defaultdict</span><span class="p">,</span> <span class="n">namedtuple</span>
<span class="kn">from</span> <span class="nn">operator</span> <span class="kn">import</span> <span class="n">attrgetter</span>

<span class="k">try</span><span class="p">:</span>
    <span class="c1"># Available in Python &gt;= 3.2</span>
    <span class="kn">from</span> <span class="nn">contextlib</span> <span class="kn">import</span> <span class="n">ContextDecorator</span>
<span class="k">except</span> <span class="ne">ImportError</span><span class="p">:</span>
    <span class="kn">import</span> <span class="nn">functools</span>

    <span class="k">class</span> <span class="nc">ContextDecorator</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
        <span class="k">def</span> <span class="fm">__call__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">func</span><span class="p">):</span>
            <span class="nd">@functools</span><span class="o">.</span><span class="n">wraps</span><span class="p">(</span><span class="n">func</span><span class="p">)</span>
            <span class="k">def</span> <span class="nf">wrapped</span><span class="p">(</span><span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
                <span class="k">with</span> <span class="bp">self</span><span class="p">:</span>
                    <span class="k">return</span> <span class="n">func</span><span class="p">(</span><span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>

            <span class="k">return</span> <span class="n">wrapped</span>


<span class="k">class</span> <span class="nc">EventList</span><span class="p">(</span><span class="nb">list</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;A list of Events (for pretty printing)&quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
        <span class="n">use_cuda</span> <span class="o">=</span> <span class="n">kwargs</span><span class="o">.</span><span class="n">pop</span><span class="p">(</span><span class="s1">&#39;use_cuda&#39;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">EventList</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_cpu_children_populated</span> <span class="o">=</span> <span class="kc">False</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_use_cuda</span> <span class="o">=</span> <span class="n">use_cuda</span>

    <span class="k">def</span> <span class="fm">__str__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">table</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">populate_cpu_children</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Populates child events into each underlying FunctionEvent object.</span>
<span class="sd">        One event is a child of another if [s1, e1) is inside [s2, e2). Where</span>
<span class="sd">        s1 and e1 would be start and end of the child event&#39;s interval. And</span>
<span class="sd">        s2 and e2 start and end of the parent event&#39;s interval</span>

<span class="sd">        Example: In event list [[0, 10], [1, 3], [3, 4]] would have make [0, 10]</span>
<span class="sd">        be a parent of two other intervals.</span>

<span class="sd">        If for any reason two intervals intersect only partialy, this function</span>
<span class="sd">        will not record a parent child relationship between then.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">cpu_children_populated</span><span class="p">:</span>
            <span class="k">return</span>
        <span class="n">events</span> <span class="o">=</span> <span class="nb">sorted</span><span class="p">(</span>
            <span class="bp">self</span><span class="p">,</span>
            <span class="n">key</span><span class="o">=</span><span class="n">attrgetter</span><span class="p">(</span><span class="s2">&quot;thread&quot;</span><span class="p">),</span>
        <span class="p">)</span>
        <span class="n">threads</span> <span class="o">=</span> <span class="n">itertools</span><span class="o">.</span><span class="n">groupby</span><span class="p">(</span><span class="n">events</span><span class="p">,</span> <span class="n">key</span><span class="o">=</span><span class="n">attrgetter</span><span class="p">(</span><span class="s2">&quot;thread&quot;</span><span class="p">))</span>

        <span class="c1"># For each thread we keep a stack of current nested parents.</span>
        <span class="c1"># We maintain the invariant that each interval is a subset of all other</span>
        <span class="c1"># intervals lower in the stack.</span>
        <span class="c1">#</span>
        <span class="c1"># First we sort the intervals by their start time. Then we iterate over them.</span>
        <span class="c1"># Every time we see a new interval we remove several parents from</span>
        <span class="c1"># the top until we restore the invariant. Then parent child relationship</span>
        <span class="c1"># if recorded if the stack is not empty.</span>
        <span class="c1"># Finally we add new interval to the list</span>
        <span class="c1">#</span>
        <span class="c1"># Algorithm has O(N * log(N)) complexity where N is number of</span>
        <span class="c1"># intervals</span>
        <span class="k">for</span> <span class="n">thread_id</span><span class="p">,</span> <span class="n">thread_events</span> <span class="ow">in</span> <span class="n">threads</span><span class="p">:</span>
            <span class="n">thread_events</span> <span class="o">=</span> <span class="nb">sorted</span><span class="p">(</span>
                <span class="n">thread_events</span><span class="p">,</span>
                <span class="n">key</span><span class="o">=</span><span class="k">lambda</span> <span class="n">event</span><span class="p">:</span> <span class="p">[</span><span class="n">event</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">start</span><span class="p">,</span> <span class="o">-</span><span class="n">event</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">end</span><span class="p">],</span>
            <span class="p">)</span>
            <span class="n">current_events</span> <span class="o">=</span> <span class="p">[]</span>
            <span class="n">cur_end</span> <span class="o">=</span> <span class="mi">0</span>
            <span class="k">for</span> <span class="n">event</span> <span class="ow">in</span> <span class="n">thread_events</span><span class="p">:</span>
                <span class="k">while</span> <span class="nb">len</span><span class="p">(</span><span class="n">current_events</span><span class="p">)</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
                    <span class="n">parent</span> <span class="o">=</span> <span class="n">current_events</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
                    <span class="k">if</span> <span class="n">event</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">start</span> <span class="o">&gt;=</span> <span class="n">parent</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">end</span> <span class="ow">or</span> \
                            <span class="n">event</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">end</span> <span class="o">&gt;</span> <span class="n">parent</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">end</span><span class="p">:</span>
                        <span class="c1"># this can&#39;t be a parent</span>
                        <span class="n">current_events</span><span class="o">.</span><span class="n">pop</span><span class="p">()</span>
                    <span class="k">else</span><span class="p">:</span>
                        <span class="n">parent</span><span class="o">.</span><span class="n">append_cpu_child</span><span class="p">(</span><span class="n">event</span><span class="p">)</span>
                        <span class="k">break</span>

                <span class="n">current_events</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">event</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">_cpu_children_populated</span> <span class="o">=</span> <span class="kc">True</span>

    <span class="nd">@property</span>
    <span class="k">def</span> <span class="nf">self_cpu_time_total</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="nb">sum</span><span class="p">([</span><span class="n">event</span><span class="o">.</span><span class="n">self_cpu_time_total</span> <span class="k">for</span> <span class="n">event</span> <span class="ow">in</span> <span class="bp">self</span><span class="p">])</span>

    <span class="nd">@property</span>
    <span class="k">def</span> <span class="nf">cpu_children_populated</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_cpu_children_populated</span>

    <span class="k">def</span> <span class="nf">table</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">sort_by</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">row_limit</span><span class="o">=</span><span class="mi">100</span><span class="p">,</span> <span class="n">header</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Prints an EventList as a nicely formatted table.</span>

<span class="sd">        Arguments:</span>
<span class="sd">            sort_by (str, optional): Attribute used to sort entries. By default</span>
<span class="sd">                they are printed in the same order as they were registered.</span>
<span class="sd">                Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``,</span>
<span class="sd">                ``cuda_time_total``, ``count``.</span>

<span class="sd">        Returns:</span>
<span class="sd">            A string containing the table.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">return</span> <span class="n">build_table</span><span class="p">(</span>
            <span class="bp">self</span><span class="p">,</span> <span class="n">sort_by</span><span class="o">=</span><span class="n">sort_by</span><span class="p">,</span> <span class="n">row_limit</span><span class="o">=</span><span class="n">row_limit</span><span class="p">,</span> <span class="n">header</span><span class="o">=</span><span class="n">header</span><span class="p">,</span> <span class="n">use_cuda</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_use_cuda</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">export_chrome_trace</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Exports an EventList as a Chrome tracing tools file.</span>

<span class="sd">        The checkpoint can be later loaded and inspected under ``chrome://tracing`` URL.</span>

<span class="sd">        Arguments:</span>
<span class="sd">            path (str): Path where the trace will be written.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="kn">import</span> <span class="nn">os</span>
        <span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="s1">&#39;w&#39;</span><span class="p">)</span> <span class="k">as</span> <span class="n">f</span><span class="p">:</span>
            <span class="n">chrome_events</span> <span class="o">=</span> <span class="p">[]</span>
            <span class="n">next_id</span> <span class="o">=</span> <span class="mi">0</span>
            <span class="c1"># Use file IO over using json.dump since JSON dumping is very slow and</span>
            <span class="c1"># this technique is proven to give a 4x speedup.</span>
            <span class="n">f</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="s2">&quot;[&quot;</span><span class="p">)</span>
            <span class="k">for</span> <span class="n">evt</span> <span class="ow">in</span> <span class="bp">self</span><span class="p">:</span>
                <span class="n">f</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="s1">&#39;{&quot;name&quot;: &quot;</span><span class="si">%s</span><span class="s1">&quot;, &#39;</span>
                        <span class="s1">&#39;&quot;ph&quot;: &quot;X&quot;, &#39;</span>
                        <span class="s1">&#39;&quot;ts&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                        <span class="s1">&#39;&quot;dur&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                        <span class="s1">&#39;&quot;tid&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                        <span class="s1">&#39;&quot;pid&quot;: &quot;CPU functions&quot;, &#39;</span>
                        <span class="s1">&#39;&quot;args&quot;: </span><span class="si">{}</span><span class="s1">}, &#39;</span> <span class="o">%</span> <span class="p">(</span><span class="n">evt</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="n">evt</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">start</span><span class="p">,</span>
                                           <span class="n">evt</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">elapsed_us</span><span class="p">(),</span> <span class="n">evt</span><span class="o">.</span><span class="n">thread</span><span class="p">))</span>
                <span class="k">for</span> <span class="n">k</span> <span class="ow">in</span> <span class="n">evt</span><span class="o">.</span><span class="n">kernels</span><span class="p">:</span>
                    <span class="c1"># &#39;s&#39; and &#39;f&#39; draw Flow arrows from</span>
                    <span class="c1"># the CPU launch to the GPU kernel</span>
                    <span class="n">f</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="s1">&#39;{&quot;name&quot;: &quot;</span><span class="si">%s</span><span class="s1">&quot;, &#39;</span>
                            <span class="s1">&#39;&quot;ph&quot;: &quot;s&quot;, &#39;</span>
                            <span class="s1">&#39;&quot;ts&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                            <span class="s1">&#39;&quot;tid&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                            <span class="s1">&#39;&quot;pid&quot;: &quot;CPU functions&quot;, &#39;</span>
                            <span class="s1">&#39;&quot;id&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                            <span class="s1">&#39;&quot;cat&quot;: &quot;cpu_to_cuda&quot;, &#39;</span>
                            <span class="s1">&#39;&quot;args&quot;: </span><span class="si">{}</span><span class="s1">}, &#39;</span> <span class="o">%</span> <span class="p">(</span><span class="n">evt</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="n">evt</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">start</span><span class="p">,</span>
                                               <span class="n">evt</span><span class="o">.</span><span class="n">thread</span><span class="p">,</span> <span class="n">next_id</span><span class="p">))</span>
                    <span class="n">f</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="s1">&#39;{&quot;name&quot;: &quot;</span><span class="si">%s</span><span class="s1">&quot;, &#39;</span>
                            <span class="s1">&#39;&quot;ph&quot;: &quot;f&quot;, &#39;</span>
                            <span class="s1">&#39;&quot;ts&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                            <span class="s1">&#39;&quot;tid&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                            <span class="s1">&#39;&quot;pid&quot;: &quot;CUDA functions&quot;, &#39;</span>
                            <span class="s1">&#39;&quot;id&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                            <span class="s1">&#39;&quot;cat&quot;: &quot;cpu_to_cuda&quot;, &#39;</span>
                            <span class="s1">&#39;&quot;args&quot;: </span><span class="si">{}</span><span class="s1">}, &#39;</span> <span class="o">%</span> <span class="p">(</span><span class="n">k</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="n">k</span><span class="o">.</span><span class="n">interval</span><span class="o">.</span><span class="n">start</span><span class="p">,</span> <span class="n">k</span><span class="o">.</span><span class="n">device</span><span class="p">,</span> <span class="n">next_id</span><span class="p">))</span>
                    <span class="n">f</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="s1">&#39;{&quot;name&quot;: &quot;</span><span class="si">%s</span><span class="s1">&quot;, &#39;</span>
                            <span class="s1">&#39;&quot;ph&quot;: &quot;X&quot;, &#39;</span>
                            <span class="s1">&#39;&quot;ts&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                            <span class="s1">&#39;&quot;dur&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                            <span class="s1">&#39;&quot;tid&quot;: </span><span class="si">%s</span><span class="s1">, &#39;</span>
                            <span class="s1">&#39;&quot;pid&quot;: &quot;CUDA functions&quot;, &#39;</span>
                            <span class="s1">&#39;&quot;args&quot;: </span><span class="si">{}</span><span class="s1">}, &#39;</span> <span class="o">%</span> <span class="p">(</span><span class="n">k</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="n">k</span><span class="o">.</span><span class="n">interval</span><span class="o">.</span><span class="n">start</span><span class="p">,</span>
                                               <span class="n">k</span><span class="o">.</span><span class="n">interval</span><span class="o">.</span><span class="n">elapsed_us</span><span class="p">(),</span> <span class="n">k</span><span class="o">.</span><span class="n">device</span><span class="p">))</span>
                    <span class="n">next_id</span> <span class="o">+=</span> <span class="mi">1</span>

            <span class="c1"># remove trailing whitespace and comma</span>
            <span class="n">f</span><span class="o">.</span><span class="n">seek</span><span class="p">(</span><span class="n">f</span><span class="o">.</span><span class="n">tell</span><span class="p">()</span> <span class="o">-</span> <span class="mi">2</span><span class="p">,</span> <span class="n">os</span><span class="o">.</span><span class="n">SEEK_SET</span><span class="p">)</span>
            <span class="n">f</span><span class="o">.</span><span class="n">truncate</span><span class="p">()</span>
            <span class="n">f</span><span class="o">.</span><span class="n">write</span><span class="p">(</span><span class="s2">&quot;]&quot;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">key_averages</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">group_by_input_shapes</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Averages all function events over their keys.</span>

<span class="sd">        @param group_by_input_shapes The key would become</span>
<span class="sd">        (event name, input dimensions) rather than just event name.</span>
<span class="sd">        This is useful to see which dimensionality contributes to the runtime</span>
<span class="sd">        the most and may help with dimension specific optimizations or</span>
<span class="sd">        choosing best candidates for quantization (aka fitting a roof line)</span>

<span class="sd">        Returns:</span>
<span class="sd">            An EventList containing FunctionEventAvg objects.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">populate_cpu_children</span><span class="p">()</span>
        <span class="n">stats</span> <span class="o">=</span> <span class="n">defaultdict</span><span class="p">(</span><span class="n">FunctionEventAvg</span><span class="p">)</span>

        <span class="k">def</span> <span class="nf">get_key</span><span class="p">(</span><span class="n">event</span><span class="p">,</span> <span class="n">group_by_input_shapes</span><span class="p">):</span>
            <span class="k">if</span> <span class="ow">not</span> <span class="n">group_by_input_shapes</span><span class="p">:</span>
                <span class="k">return</span> <span class="n">event</span><span class="o">.</span><span class="n">key</span>
            <span class="k">return</span> <span class="p">(</span><span class="n">event</span><span class="o">.</span><span class="n">key</span><span class="p">,</span> <span class="nb">str</span><span class="p">(</span><span class="n">event</span><span class="o">.</span><span class="n">input_shapes</span><span class="p">))</span>
        <span class="k">for</span> <span class="n">evt</span> <span class="ow">in</span> <span class="bp">self</span><span class="p">:</span>
            <span class="n">stats</span><span class="p">[</span><span class="n">get_key</span><span class="p">(</span><span class="n">evt</span><span class="p">,</span> <span class="n">group_by_input_shapes</span><span class="p">)]</span><span class="o">.</span><span class="n">add</span><span class="p">(</span>
                <span class="n">evt</span><span class="p">,</span> <span class="n">group_by_input_shapes</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">EventList</span><span class="p">(</span><span class="n">stats</span><span class="o">.</span><span class="n">values</span><span class="p">(),</span> <span class="n">use_cuda</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_use_cuda</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">total_average</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Averages all events.</span>

<span class="sd">        Returns:</span>
<span class="sd">            A FunctionEventAvg object.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="n">total_stat</span> <span class="o">=</span> <span class="n">FunctionEventAvg</span><span class="p">()</span>
        <span class="k">for</span> <span class="n">evt</span> <span class="ow">in</span> <span class="bp">self</span><span class="p">:</span>
            <span class="n">total_stat</span> <span class="o">+=</span> <span class="n">evt</span>
            <span class="n">total_stat</span><span class="o">.</span><span class="n">key</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="n">total_stat</span><span class="o">.</span><span class="n">key</span> <span class="o">=</span> <span class="s1">&#39;Total&#39;</span>
        <span class="k">return</span> <span class="n">total_stat</span>


<div class="viewcode-block" id="profile"><a class="viewcode-back" href="../../../autograd.html#torch.autograd.profiler.profile">[docs]</a><span class="k">class</span> <span class="nc">profile</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Context manager that manages autograd profiler state and holds a summary of results.</span>
<span class="sd">    Under the hood it just records events of functions being executed in C++ and</span>
<span class="sd">    exposes those events to Python. You can wrap any code into it and it will</span>
<span class="sd">    only report runtime of PyTorch functions.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        enabled (bool, optional): Setting this to False makes this context manager a no-op.</span>
<span class="sd">            Default: ``True``.</span>

<span class="sd">        use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API.</span>
<span class="sd">            Adds approximately 4us of overhead to each tensor operation.</span>
<span class="sd">            Default: ``False``</span>

<span class="sd">        record_shapes (bool, optional): If shapes recording is set, information</span>
<span class="sd">            about input dimensions will be collected. This allows one to see which</span>
<span class="sd">            dimensions have been used under the hood and further group by them</span>
<span class="sd">            using prof.key_averages(group_by_input_shape=True). Please note that</span>
<span class="sd">            shape recording might skew your profiling data. It is recommended to</span>
<span class="sd">            use separate runs with and without shape recording to validate the timing.</span>
<span class="sd">            Most likely the skew will be negligible for bottom most events (in a case</span>
<span class="sd">            of nested function calls). But for higher level functions the total</span>
<span class="sd">            self cpu time might be artificially increased because of the shape</span>
<span class="sd">            collection.</span>

<span class="sd">    .. warning:</span>
<span class="sd">        This context managers should not be called recursively, i.e. at most one</span>
<span class="sd">        instance should be enabled at any given time.</span>

<span class="sd">    .. warning:</span>
<span class="sd">        Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_),</span>
<span class="sd">        one cannot use the profiler with ``use_cuda = True`` to benchmark</span>
<span class="sd">        DataLoaders with ``num_workers &gt; 0``. If you wish to benchmark data loading,</span>
<span class="sd">        please use ``use_cuda = False`` or ``num_workers = 0``.</span>

<span class="sd">    Example:</span>
<span class="sd">        &gt;&gt;&gt; x = torch.randn((1, 1), requires_grad=True)</span>
<span class="sd">        &gt;&gt;&gt; with torch.autograd.profiler.profile() as prof:</span>
<span class="sd">        &gt;&gt;&gt;     for _ in range(100):  # any normal python code, really!</span>
<span class="sd">        &gt;&gt;&gt;         y = x ** 2</span>
<span class="sd">        &gt;&gt;          y.backward()</span>
<span class="sd">        &gt;&gt;&gt; # NOTE: some columns were removed for brevity</span>
<span class="sd">        &gt;&gt;&gt; print(prof.key_averages().table(sort_by=&quot;self_cpu_time_total&quot;))</span>
<span class="sd">        -----------------------------------  ---------------  ---------------  ---------------</span>
<span class="sd">        Name                                 Self CPU total   CPU time avg     Number of Calls</span>
<span class="sd">        -----------------------------------  ---------------  ---------------  ---------------</span>
<span class="sd">        mul                                  32.048ms         32.048ms         200</span>
<span class="sd">        pow                                  27.041ms         27.041ms         200</span>
<span class="sd">        PowBackward0                         9.727ms          55.483ms         100</span>
<span class="sd">        torch::autograd::AccumulateGrad      9.148ms          9.148ms          100</span>
<span class="sd">        torch::autograd::GraphRoot           691.816us        691.816us        100</span>
<span class="sd">        -----------------------------------  ---------------  ---------------  ---------------</span>

<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">enabled</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">use_cuda</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">record_shapes</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">enabled</span> <span class="o">=</span> <span class="n">enabled</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">use_cuda</span> <span class="o">=</span> <span class="n">use_cuda</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">function_events</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">enabled</span><span class="p">:</span>
            <span class="k">return</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">entered</span> <span class="o">=</span> <span class="kc">False</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">record_shapes</span> <span class="o">=</span> <span class="n">record_shapes</span>

    <span class="k">def</span> <span class="fm">__enter__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">enabled</span><span class="p">:</span>
            <span class="k">return</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">entered</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;autograd profiler traces are not reentrant&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">entered</span> <span class="o">=</span> <span class="kc">True</span>
        <span class="n">profiler_kind</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">ProfilerState</span><span class="o">.</span><span class="n">CUDA</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">use_cuda</span> \
            <span class="k">else</span> <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">ProfilerState</span><span class="o">.</span><span class="n">CPU</span>
        <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">_enable_profiler</span><span class="p">(</span>
            <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">ProfilerConfig</span><span class="p">(</span><span class="n">profiler_kind</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">record_shapes</span><span class="p">))</span>
        <span class="k">return</span> <span class="bp">self</span>

    <span class="k">def</span> <span class="fm">__exit__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">exc_type</span><span class="p">,</span> <span class="n">exc_val</span><span class="p">,</span> <span class="n">exc_tb</span><span class="p">):</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">enabled</span><span class="p">:</span>
            <span class="k">return</span>
        <span class="n">records</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">_disable_profiler</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">function_events</span> <span class="o">=</span> <span class="n">EventList</span><span class="p">(</span><span class="n">parse_cpu_trace</span><span class="p">(</span><span class="n">records</span><span class="p">),</span> <span class="n">use_cuda</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">use_cuda</span><span class="p">)</span>
        <span class="k">return</span> <span class="kc">False</span>

    <span class="k">def</span> <span class="fm">__repr__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">function_events</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="k">return</span> <span class="s1">&#39;&lt;unfinished torch.autograd.profile&gt;&#39;</span>
        <span class="k">return</span> <span class="nb">repr</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">function_events</span><span class="p">)</span>

    <span class="k">def</span> <span class="fm">__str__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">function_events</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="k">return</span> <span class="s1">&#39;&lt;unfinished torch.autograd.profile&gt;&#39;</span>
        <span class="k">return</span> <span class="nb">str</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">function_events</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">_check_finish</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">function_events</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;can&#39;t export a trace that didn&#39;t finish running&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">function_events</span><span class="o">.</span><span class="n">populate_cpu_children</span><span class="p">()</span>

<div class="viewcode-block" id="profile.table"><a class="viewcode-back" href="../../../autograd.html#torch.autograd.profiler.profile.table">[docs]</a>    <span class="k">def</span> <span class="nf">table</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">sort_by</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">row_limit</span><span class="o">=</span><span class="mi">100</span><span class="p">,</span> <span class="n">header</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_check_finish</span><span class="p">()</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">function_events</span><span class="o">.</span><span class="n">table</span><span class="p">(</span>
            <span class="n">sort_by</span><span class="o">=</span><span class="n">sort_by</span><span class="p">,</span> <span class="n">row_limit</span><span class="o">=</span><span class="n">row_limit</span><span class="p">,</span> <span class="n">header</span><span class="o">=</span><span class="n">header</span><span class="p">)</span></div>
    <span class="n">table</span><span class="o">.</span><span class="vm">__doc__</span> <span class="o">=</span> <span class="n">EventList</span><span class="o">.</span><span class="n">table</span><span class="o">.</span><span class="vm">__doc__</span>

<div class="viewcode-block" id="profile.export_chrome_trace"><a class="viewcode-back" href="../../../autograd.html#torch.autograd.profiler.profile.export_chrome_trace">[docs]</a>    <span class="k">def</span> <span class="nf">export_chrome_trace</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_check_finish</span><span class="p">()</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">function_events</span><span class="o">.</span><span class="n">export_chrome_trace</span><span class="p">(</span><span class="n">path</span><span class="p">)</span></div>
    <span class="n">export_chrome_trace</span><span class="o">.</span><span class="vm">__doc__</span> <span class="o">=</span> <span class="n">EventList</span><span class="o">.</span><span class="n">export_chrome_trace</span><span class="o">.</span><span class="vm">__doc__</span>

<div class="viewcode-block" id="profile.key_averages"><a class="viewcode-back" href="../../../autograd.html#torch.autograd.profiler.profile.key_averages">[docs]</a>    <span class="k">def</span> <span class="nf">key_averages</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">group_by_input_shape</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_check_finish</span><span class="p">()</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">function_events</span><span class="o">.</span><span class="n">key_averages</span><span class="p">(</span><span class="n">group_by_input_shape</span><span class="p">)</span></div>
    <span class="n">key_averages</span><span class="o">.</span><span class="vm">__doc__</span> <span class="o">=</span> <span class="n">EventList</span><span class="o">.</span><span class="n">key_averages</span><span class="o">.</span><span class="vm">__doc__</span>

<div class="viewcode-block" id="profile.total_average"><a class="viewcode-back" href="../../../autograd.html#torch.autograd.profiler.profile.total_average">[docs]</a>    <span class="k">def</span> <span class="nf">total_average</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_check_finish</span><span class="p">()</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">function_events</span><span class="o">.</span><span class="n">total_average</span><span class="p">()</span></div>
    <span class="n">total_average</span><span class="o">.</span><span class="vm">__doc__</span> <span class="o">=</span> <span class="n">EventList</span><span class="o">.</span><span class="n">total_average</span><span class="o">.</span><span class="vm">__doc__</span>

    <span class="nd">@property</span>
    <span class="k">def</span> <span class="nf">self_cpu_time_total</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot; Returns total time spent on CPU obtained as a sum of</span>
<span class="sd">        all self times across all the events.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_check_finish</span><span class="p">()</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">function_events</span><span class="o">.</span><span class="n">self_cpu_time_total</span></div>


<div class="viewcode-block" id="record_function"><a class="viewcode-back" href="../../../autograd.html#torch.autograd.profiler.record_function">[docs]</a><span class="k">class</span> <span class="nc">record_function</span><span class="p">(</span><span class="n">ContextDecorator</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Context manager/function decorator that adds a label to a block of</span>
<span class="sd">    Python code (or function) when running autograd profiler. It is</span>
<span class="sd">    useful when tracing the code profile.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        name (str): Label assigned to the block of code.</span>

<span class="sd">    Example:</span>
<span class="sd">        &gt;&gt;&gt; x = torch.randn((1, 1), requires_grad=True)</span>
<span class="sd">        &gt;&gt;&gt; with torch.autograd.profiler.profile() as prof:</span>
<span class="sd">        ...     y = x ** 2</span>
<span class="sd">        ...     with torch.autograd.profiler.record_function(&quot;label-z&quot;): # label the block</span>
<span class="sd">        ...         z = y ** 3</span>
<span class="sd">        ...     y.backward()</span>
<span class="sd">        ...</span>
<span class="sd">        &gt;&gt;&gt; # NOTE: some columns were removed for brevity</span>
<span class="sd">        &gt;&gt;&gt; print(prof.key_averages().table(sort_by=&quot;self_cpu_time_total&quot;))</span>
<span class="sd">        -----------------------------------  ---------------  ---------------  ---------------</span>
<span class="sd">        Name                                 Self CPU total %  CPU time avg     Number of Calls</span>
<span class="sd">        -----------------------------------  ---------------  ---------------  ---------------</span>
<span class="sd">        pow                                  60.77%           47.470us         3</span>
<span class="sd">        mul                                  21.73%           25.465us         2</span>
<span class="sd">        PowBackward0                         12.03%           121.891us        1</span>
<span class="sd">        torch::autograd::AccumulateGrad      2.70%            6.324us          1</span>
<span class="sd">        label-z                              2.13%            12.421us         1</span>
<span class="sd">        torch::autograd::GraphRoot           0.64%            1.503us          1</span>
<span class="sd">        -----------------------------------  ---------------  ---------------  ---------------</span>
<span class="sd">        Self CPU time total: 234.344us</span>
<span class="sd">        CUDA time total: 0.000us</span>

<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">name</span> <span class="o">=</span> <span class="n">name</span>

    <span class="k">def</span> <span class="fm">__enter__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">handle</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">ops</span><span class="o">.</span><span class="n">profiler</span><span class="o">.</span><span class="n">_record_function_enter</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

    <span class="k">def</span> <span class="fm">__exit__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">*</span><span class="n">args</span><span class="p">):</span>
        <span class="n">torch</span><span class="o">.</span><span class="n">ops</span><span class="o">.</span><span class="n">profiler</span><span class="o">.</span><span class="n">_record_function_exit</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">handle</span><span class="p">)</span>
        <span class="k">return</span> <span class="kc">False</span></div>


<div class="viewcode-block" id="emit_nvtx"><a class="viewcode-back" href="../../../autograd.html#torch.autograd.profiler.emit_nvtx">[docs]</a><span class="k">class</span> <span class="nc">emit_nvtx</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Context manager that makes every autograd operation emit an NVTX range.</span>

<span class="sd">    It is useful when running the program under nvprof::</span>

<span class="sd">        nvprof --profile-from-start off -o trace_name.prof -- &lt;regular command here&gt;</span>

<span class="sd">    Unfortunately, there&#39;s no way to force nvprof to flush the data it collected</span>
<span class="sd">    to disk, so for CUDA profiling one has to use this context manager to annotate</span>
<span class="sd">    nvprof traces and wait for the process to exit before inspecting them.</span>
<span class="sd">    Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or</span>
<span class="sd">    :func:`torch.autograd.profiler.load_nvprof` can load the results for inspection</span>
<span class="sd">    e.g. in Python REPL.</span>

<span class="sd">    .. warning:</span>
<span class="sd">        This context manager should not be called recursively, i.e. at most one</span>
<span class="sd">        instance should be enabled at any given time.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        enabled (bool, optional, default=True): Setting ``enabled=False`` makes this context manager a no-op.</span>
<span class="sd">            Default: ``True``.</span>
<span class="sd">        record_shapes (bool, optional, default=False): If ``record_shapes=True``, the nvtx range wrapping</span>
<span class="sd">            each autograd op will append information about the sizes of Tensor arguments received</span>
<span class="sd">            by that op, in the following format:</span>
<span class="sd">            ``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]``</span>
<span class="sd">            Non-tensor arguments will be represented by ``[]``.</span>
<span class="sd">            Arguments will be listed in the order they are received by the backend op.</span>
<span class="sd">            Please note that this order may not match the order in which those arguments were passed</span>
<span class="sd">            on the Python side.  Also note that shape recording may increase the overhead of nvtx range creation.</span>

<span class="sd">    Example:</span>
<span class="sd">        &gt;&gt;&gt; with torch.cuda.profiler.profile():</span>
<span class="sd">        ...     model(x) # Warmup CUDA memory allocator and profiler</span>
<span class="sd">        ...     with torch.autograd.profiler.emit_nvtx():</span>
<span class="sd">        ...         model(x)</span>

<span class="sd">    **Forward-backward correlation**</span>

<span class="sd">    When viewing a profile created using :class:`emit_nvtx` in the Nvidia Visual Profiler,</span>
<span class="sd">    correlating each backward-pass op with the corresponding forward-pass op can be difficult.</span>
<span class="sd">    To ease this task, :class:`emit_nvtx` appends sequence number information to the ranges it</span>
<span class="sd">    generates.</span>

<span class="sd">    During the forward pass, each function range is decorated with ``seq=&lt;N&gt;``.  ``seq`` is a running</span>
<span class="sd">    counter, incremented each time a new backward Function object is created and stashed for backward.</span>
<span class="sd">    Thus, the ``seq=&lt;N&gt;`` annotation associated with each forward function range tells you that</span>
<span class="sd">    if a backward Function object is created by this forward function,</span>
<span class="sd">    the backward object will receive sequence number N.</span>
<span class="sd">    During the backward pass, the top-level range wrapping each C++ backward Function&#39;s</span>
<span class="sd">    ``apply()`` call is decorated with ``stashed seq=&lt;M&gt;``.  ``M`` is the sequence number that</span>
<span class="sd">    the backward object was created with.  By comparing ``stashed seq`` numbers in backward with ``seq``</span>
<span class="sd">    numbers in forward, you can track down which forward op created each backward Function.</span>

<span class="sd">    Any functions executed during the backward pass are also decorated with ``seq=&lt;N&gt;``.  During</span>
<span class="sd">    default backward (with ``create_graph=False``) this information is irrelevant, and in fact,</span>
<span class="sd">    ``N`` may simply be 0 for all such functions.  Only the top-level ranges associated with</span>
<span class="sd">    backward Function objects&#39; ``apply()`` methods are useful, as a way to correlate these Function</span>
<span class="sd">    objects with the earlier forward pass.</span>

<span class="sd">    **Double-backward**</span>

<span class="sd">    If, on the other hand, a backward pass with ``create_graph=True`` is underway (in other words,</span>
<span class="sd">    if you are setting up for a double-backward), each function&#39;s execution during backward</span>
<span class="sd">    is given a nonzero, useful ``seq=&lt;N&gt;``.  Those functions may themselves create Function objects</span>
<span class="sd">    to be executed later during double-backward, just as the original functions in the forward pass did.</span>
<span class="sd">    The relationship between backward and double-backward is conceptually the same as the relationship</span>
<span class="sd">    between forward and backward: The functions still emit current-sequence-number-tagged ranges,</span>
<span class="sd">    the Function objects they create still stash those sequence numbers, and during the eventual</span>
<span class="sd">    double-backward, the Function objects&#39; ``apply()`` ranges are still tagged with ``stashed seq``</span>
<span class="sd">    numbers, which can be compared to `seq` numbers from the backward pass.</span>

<span class="sd">    .. warning:</span>
<span class="sd">        The sequence number is thread-local, and some forward functions don&#39;t create an associated</span>
<span class="sd">        backward Function object (instead delegating that to sub-functions further down the call chain).</span>
<span class="sd">        For these reasons, the correspondence of stashed sequence numbers in</span>
<span class="sd">        backward Function ``apply()`` ranges with `seq` numbers in forward-pass ranges is</span>
<span class="sd">        not guaranteed to be 1 to 1.  The sequence numbers alone may not be enough to fully</span>
<span class="sd">        disambiguate which forward function created which</span>
<span class="sd">        backward Function object.  You may need to make a judgment based on analytic knowledge of what</span>
<span class="sd">        the expected correspondence should be.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">enabled</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">record_shapes</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">enabled</span> <span class="o">=</span> <span class="n">enabled</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">entered</span> <span class="o">=</span> <span class="kc">False</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">record_shapes</span> <span class="o">=</span> <span class="n">record_shapes</span>

    <span class="k">def</span> <span class="fm">__enter__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">enabled</span><span class="p">:</span>
            <span class="k">return</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">entered</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;NVTX annotation context manager is not reentrant&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">entered</span> <span class="o">=</span> <span class="kc">True</span>
        <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">synchronize</span><span class="p">()</span>
        <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">_enable_profiler</span><span class="p">(</span>
            <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">ProfilerConfig</span><span class="p">(</span>
                <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">ProfilerState</span><span class="o">.</span><span class="n">NVTX</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">record_shapes</span>
            <span class="p">)</span>
        <span class="p">)</span>
        <span class="k">return</span> <span class="bp">self</span>

    <span class="k">def</span> <span class="fm">__exit__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">exc_type</span><span class="p">,</span> <span class="n">exc_val</span><span class="p">,</span> <span class="n">exc_tb</span><span class="p">):</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">enabled</span><span class="p">:</span>
            <span class="k">return</span>
        <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">synchronize</span><span class="p">()</span>
        <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">_disable_profiler</span><span class="p">()</span>
        <span class="k">return</span> <span class="kc">False</span></div>


<div class="viewcode-block" id="load_nvprof"><a class="viewcode-back" href="../../../autograd.html#torch.autograd.profiler.load_nvprof">[docs]</a><span class="k">def</span> <span class="nf">load_nvprof</span><span class="p">(</span><span class="n">path</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Opens an nvprof trace file and parses autograd annotations.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        path (str): path to nvprof trace</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">return</span> <span class="n">EventList</span><span class="p">(</span><span class="n">parse_nvprof_trace</span><span class="p">(</span><span class="n">path</span><span class="p">))</span></div>


<span class="c1">################################################################################</span>
<span class="c1"># FunctionEvent</span>

<span class="k">def</span> <span class="nf">format_time</span><span class="p">(</span><span class="n">time_us</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Defines how to format time in FunctionEvent&quot;&quot;&quot;</span>
    <span class="n">US_IN_SECOND</span> <span class="o">=</span> <span class="mf">1000.0</span> <span class="o">*</span> <span class="mf">1000.0</span>
    <span class="n">US_IN_MS</span> <span class="o">=</span> <span class="mf">1000.0</span>
    <span class="k">if</span> <span class="n">time_us</span> <span class="o">&gt;=</span> <span class="n">US_IN_SECOND</span><span class="p">:</span>
        <span class="k">return</span> <span class="s1">&#39;</span><span class="si">{:.3f}</span><span class="s1">s&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">time_us</span> <span class="o">/</span> <span class="n">US_IN_SECOND</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">time_us</span> <span class="o">&gt;=</span> <span class="n">US_IN_MS</span><span class="p">:</span>
        <span class="k">return</span> <span class="s1">&#39;</span><span class="si">{:.3f}</span><span class="s1">ms&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">time_us</span> <span class="o">/</span> <span class="n">US_IN_MS</span><span class="p">)</span>
    <span class="k">return</span> <span class="s1">&#39;</span><span class="si">{:.3f}</span><span class="s1">us&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">time_us</span><span class="p">)</span>


<span class="k">def</span> <span class="nf">format_time_share</span><span class="p">(</span><span class="n">time_us</span><span class="p">,</span> <span class="n">total_time_us</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Defines how to format time in FunctionEvent&quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="n">total_time_us</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
        <span class="k">assert</span><span class="p">(</span><span class="n">time_us</span> <span class="o">==</span> <span class="mi">0</span><span class="p">)</span>
        <span class="k">return</span> <span class="s2">&quot;NaN&quot;</span>
    <span class="k">return</span> <span class="s1">&#39;</span><span class="si">{:.2f}</span><span class="s1">%&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">time_us</span> <span class="o">*</span> <span class="mf">100.0</span> <span class="o">/</span> <span class="n">total_time_us</span><span class="p">)</span>


<span class="k">def</span> <span class="nf">attr_formatter</span><span class="p">(</span><span class="n">name</span><span class="p">):</span>
    <span class="k">return</span> <span class="nb">property</span><span class="p">(</span><span class="k">lambda</span> <span class="bp">self</span><span class="p">:</span> <span class="n">format_time</span><span class="p">(</span><span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">)))</span>


<span class="k">class</span> <span class="nc">FormattedTimesMixin</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Helpers for FunctionEvent and FunctionEventAvg.</span>

<span class="sd">    The subclass should define `*_time_total` and `count` attributes.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">cpu_time_str</span> <span class="o">=</span> <span class="n">attr_formatter</span><span class="p">(</span><span class="s1">&#39;cpu_time&#39;</span><span class="p">)</span>
    <span class="n">cuda_time_str</span> <span class="o">=</span> <span class="n">attr_formatter</span><span class="p">(</span><span class="s1">&#39;cuda_time&#39;</span><span class="p">)</span>
    <span class="n">cpu_time_total_str</span> <span class="o">=</span> <span class="n">attr_formatter</span><span class="p">(</span><span class="s1">&#39;cpu_time_total&#39;</span><span class="p">)</span>
    <span class="n">cuda_time_total_str</span> <span class="o">=</span> <span class="n">attr_formatter</span><span class="p">(</span><span class="s1">&#39;cuda_time_total&#39;</span><span class="p">)</span>
    <span class="n">self_cpu_time_total_str</span> <span class="o">=</span> <span class="n">attr_formatter</span><span class="p">(</span><span class="s1">&#39;self_cpu_time_total&#39;</span><span class="p">)</span>

    <span class="nd">@property</span>
    <span class="k">def</span> <span class="nf">cpu_time</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="mf">0.0</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">count</span> <span class="o">==</span> <span class="mi">0</span> <span class="k">else</span> <span class="mf">1.0</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">cpu_time_total</span> <span class="o">/</span> <span class="bp">self</span><span class="o">.</span><span class="n">count</span>

    <span class="nd">@property</span>
    <span class="k">def</span> <span class="nf">cuda_time</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="mf">0.0</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">count</span> <span class="o">==</span> <span class="mi">0</span> <span class="k">else</span> <span class="mf">1.0</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">cuda_time_total</span> <span class="o">/</span> <span class="bp">self</span><span class="o">.</span><span class="n">count</span>


<span class="k">class</span> <span class="nc">Interval</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">start</span><span class="p">,</span> <span class="n">end</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">start</span> <span class="o">=</span> <span class="n">start</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">end</span> <span class="o">=</span> <span class="n">end</span>

    <span class="k">def</span> <span class="nf">elapsed_us</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">end</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">start</span>


<span class="n">Kernel</span> <span class="o">=</span> <span class="n">namedtuple</span><span class="p">(</span><span class="s1">&#39;Kernel&#39;</span><span class="p">,</span> <span class="p">[</span><span class="s1">&#39;name&#39;</span><span class="p">,</span> <span class="s1">&#39;device&#39;</span><span class="p">,</span> <span class="s1">&#39;interval&#39;</span><span class="p">])</span>


<span class="c1"># TODO: record TID too</span>
<span class="k">class</span> <span class="nc">FunctionEvent</span><span class="p">(</span><span class="n">FormattedTimesMixin</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Profiling information about a single function.&quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">id</span><span class="p">,</span> <span class="n">name</span><span class="p">,</span> <span class="n">thread</span><span class="p">,</span> <span class="n">cpu_start</span><span class="p">,</span> <span class="n">cpu_end</span><span class="p">,</span> <span class="n">input_shapes</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">id</span> <span class="o">=</span> <span class="nb">id</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">name</span> <span class="o">=</span> <span class="n">name</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">cpu_interval</span> <span class="o">=</span> <span class="n">Interval</span><span class="p">(</span><span class="n">cpu_start</span><span class="p">,</span> <span class="n">cpu_end</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">thread</span> <span class="o">=</span> <span class="n">thread</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">kernels</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">count</span> <span class="o">=</span> <span class="mi">1</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">cpu_children</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">input_shapes</span> <span class="o">=</span> <span class="n">input_shapes</span>

    <span class="k">def</span> <span class="nf">append_kernel</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">,</span> <span class="n">device</span><span class="p">,</span> <span class="n">start</span><span class="p">,</span> <span class="n">end</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">kernels</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">Kernel</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">device</span><span class="p">,</span> <span class="n">Interval</span><span class="p">(</span><span class="n">start</span><span class="p">,</span> <span class="n">end</span><span class="p">)))</span>

    <span class="k">def</span> <span class="nf">append_cpu_child</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">child</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Append a CPU child of type FunctionEvent.</span>

<span class="sd">        One is supposed to append only dirrect children to the event to have</span>
<span class="sd">        correct self cpu time being reported.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">child</span><span class="p">,</span> <span class="n">FunctionEvent</span><span class="p">))</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">cpu_children</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">child</span><span class="p">)</span>

    <span class="nd">@property</span>
    <span class="k">def</span> <span class="nf">self_cpu_time_total</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">cpu_time_total</span> <span class="o">-</span> <span class="nb">sum</span><span class="p">(</span>
            <span class="p">[</span><span class="n">child</span><span class="o">.</span><span class="n">cpu_time_total</span> <span class="k">for</span> <span class="n">child</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">cpu_children</span><span class="p">]</span>
        <span class="p">)</span>

    <span class="nd">@property</span>
    <span class="k">def</span> <span class="nf">cuda_time_total</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="nb">sum</span><span class="p">(</span><span class="n">kinfo</span><span class="o">.</span><span class="n">interval</span><span class="o">.</span><span class="n">elapsed_us</span><span class="p">()</span> <span class="k">for</span> <span class="n">kinfo</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">kernels</span><span class="p">)</span>

    <span class="nd">@property</span>
    <span class="k">def</span> <span class="nf">cpu_time_total</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">elapsed_us</span><span class="p">()</span>

    <span class="nd">@property</span>
    <span class="k">def</span> <span class="nf">key</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span>

    <span class="k">def</span> <span class="fm">__repr__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="p">(</span>
            <span class="s1">&#39;&lt;FunctionEvent id=</span><span class="si">{}</span><span class="s1"> cpu_time=</span><span class="si">{}</span><span class="s1"> cpu_start=</span><span class="si">{}</span><span class="s1"> cpu_end=</span><span class="si">{}</span><span class="s1"> &#39;</span>
            <span class="s1">&#39;cpu_children=</span><span class="si">{}</span><span class="s1"> cuda_time=</span><span class="si">{}</span><span class="s1"> name=</span><span class="si">{}</span><span class="s1"> thread=</span><span class="si">{}</span><span class="s1"> input_shapes=</span><span class="si">{}</span><span class="s1">&gt;&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">id</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">cpu_time_str</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">start</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">end</span><span class="p">,</span>
                <span class="nb">str</span><span class="p">([</span><span class="n">child</span><span class="o">.</span><span class="n">id</span> <span class="k">for</span> <span class="n">child</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">cpu_children</span><span class="p">]),</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">cuda_time_str</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">thread</span><span class="p">,</span>
                <span class="nb">str</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">input_shapes</span><span class="p">),</span>
            <span class="p">)</span>
        <span class="p">)</span>


<span class="k">class</span> <span class="nc">FunctionEventAvg</span><span class="p">(</span><span class="n">FormattedTimesMixin</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Used to average stats over multiple FunctionEvent objects.&quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">key</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">count</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">cpu_time_total</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">cuda_time_total</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">self_cpu_time_total</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">input_shapes</span> <span class="o">=</span> <span class="kc">None</span>

    <span class="k">def</span> <span class="nf">add</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">other</span><span class="p">,</span> <span class="n">group_by_input_shapes</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">key</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">key</span> <span class="o">=</span> <span class="n">other</span><span class="o">.</span><span class="n">key</span>
            <span class="k">if</span> <span class="n">group_by_input_shapes</span><span class="p">:</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">input_shapes</span> <span class="o">=</span> <span class="n">other</span><span class="o">.</span><span class="n">input_shapes</span>

        <span class="k">assert</span> <span class="p">(</span>
            <span class="ow">not</span> <span class="n">group_by_input_shapes</span> <span class="ow">or</span>
            <span class="n">other</span><span class="o">.</span><span class="n">input_shapes</span> <span class="o">==</span> <span class="bp">self</span><span class="o">.</span><span class="n">input_shapes</span>
        <span class="p">)</span>
        <span class="k">assert</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">other</span><span class="p">,</span> <span class="p">(</span><span class="n">FunctionEvent</span><span class="p">,</span> <span class="n">FunctionEventAvg</span><span class="p">))</span>
        <span class="k">assert</span> <span class="n">other</span><span class="o">.</span><span class="n">key</span> <span class="o">==</span> <span class="bp">self</span><span class="o">.</span><span class="n">key</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">cpu_time_total</span> <span class="o">+=</span> <span class="n">other</span><span class="o">.</span><span class="n">cpu_time_total</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">cuda_time_total</span> <span class="o">+=</span> <span class="n">other</span><span class="o">.</span><span class="n">cuda_time_total</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">self_cpu_time_total</span> <span class="o">+=</span> <span class="n">other</span><span class="o">.</span><span class="n">self_cpu_time_total</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">count</span> <span class="o">+=</span> <span class="n">other</span><span class="o">.</span><span class="n">count</span>
        <span class="k">return</span> <span class="bp">self</span>

    <span class="k">def</span> <span class="fm">__iadd__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">other</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">other</span><span class="p">)</span>

    <span class="k">def</span> <span class="fm">__repr__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="p">(</span>
            <span class="s1">&#39;&lt;FunctionEventAvg key=</span><span class="si">{}</span><span class="s1"> self_cpu_time=</span><span class="si">{}</span><span class="s1"> cpu_time=</span><span class="si">{}</span><span class="s1"> &#39;</span>
            <span class="s1">&#39;cuda_time=</span><span class="si">{}</span><span class="s1"> input_shapes=</span><span class="si">{}</span><span class="s1">&gt;&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">key</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">self_cpu_time_total_str</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">cpu_time_str</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">cuda_time_str</span><span class="p">,</span>
                <span class="nb">str</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">input_shapes</span><span class="p">),</span>
            <span class="p">)</span>
        <span class="p">)</span>


<span class="c1">################################################################################</span>
<span class="c1"># Utilities</span>

<span class="k">class</span> <span class="nc">StringTable</span><span class="p">(</span><span class="n">defaultdict</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__missing__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">key</span><span class="p">):</span>
        <span class="bp">self</span><span class="p">[</span><span class="n">key</span><span class="p">]</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_C</span><span class="o">.</span><span class="n">_demangle</span><span class="p">(</span><span class="n">key</span><span class="p">)</span>
        <span class="k">return</span> <span class="bp">self</span><span class="p">[</span><span class="n">key</span><span class="p">]</span>


<span class="c1">################################################################################</span>
<span class="c1"># CPU checkpoints</span>

<span class="k">def</span> <span class="nf">parse_cpu_trace</span><span class="p">(</span><span class="n">thread_records</span><span class="p">):</span>
    <span class="n">next_id</span> <span class="o">=</span> <span class="mi">0</span>
    <span class="n">start_record</span> <span class="o">=</span> <span class="kc">None</span>
    <span class="n">cuda_records</span> <span class="o">=</span> <span class="p">{}</span>
    <span class="n">functions</span> <span class="o">=</span> <span class="p">[]</span>
    <span class="n">record_stack</span> <span class="o">=</span> <span class="p">[]</span>
    <span class="n">string_table</span> <span class="o">=</span> <span class="n">StringTable</span><span class="p">()</span>

    <span class="c1"># cuda start events and the overall profiler start event don&#39;t happen</span>
    <span class="c1"># at exactly the same time because we need to record an event on each device</span>
    <span class="c1"># and each record takes ~4us. So we adjust here by the difference</span>
    <span class="c1"># adding the difference in CPU time between the profiler start event</span>
    <span class="c1"># and the CPU time of the cuda start event for the device</span>
    <span class="k">def</span> <span class="nf">adjusted_time</span><span class="p">(</span><span class="n">cuda_record</span><span class="p">):</span>
        <span class="k">assert</span> <span class="n">cuda_record</span><span class="o">.</span><span class="n">device</span><span class="p">()</span> <span class="o">!=</span> <span class="o">-</span><span class="mi">1</span>
        <span class="n">cuda_time_0</span> <span class="o">=</span> <span class="n">cuda_records</span><span class="p">[</span><span class="n">cuda_record</span><span class="o">.</span><span class="n">device</span><span class="p">()]</span>
        <span class="k">return</span> <span class="n">cuda_time_0</span><span class="o">.</span><span class="n">cuda_elapsed_us</span><span class="p">(</span><span class="n">cuda_record</span><span class="p">)</span> <span class="o">+</span> <span class="n">start_record</span><span class="o">.</span><span class="n">cpu_elapsed_us</span><span class="p">(</span><span class="n">cuda_time_0</span><span class="p">)</span>

    <span class="c1"># &#39;__start_profile&#39; is not guarenteed to be first, so we must find it here</span>
    <span class="k">for</span> <span class="n">record</span> <span class="ow">in</span> <span class="n">itertools</span><span class="o">.</span><span class="n">chain</span><span class="p">(</span><span class="o">*</span><span class="n">thread_records</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">record</span><span class="o">.</span><span class="n">name</span><span class="p">()</span> <span class="o">==</span> <span class="s1">&#39;__start_profile&#39;</span><span class="p">:</span>
            <span class="n">start_record</span> <span class="o">=</span> <span class="n">record</span>
        <span class="k">elif</span> <span class="n">record</span><span class="o">.</span><span class="n">name</span><span class="p">()</span> <span class="o">==</span> <span class="s1">&#39;__cuda_start_event&#39;</span><span class="p">:</span>
            <span class="k">assert</span> <span class="n">record</span><span class="o">.</span><span class="n">device</span><span class="p">()</span> <span class="o">!=</span> <span class="o">-</span><span class="mi">1</span>
            <span class="n">cuda_records</span><span class="p">[</span><span class="n">record</span><span class="o">.</span><span class="n">device</span><span class="p">()]</span> <span class="o">=</span> <span class="n">record</span>
    <span class="k">assert</span> <span class="n">start_record</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>

    <span class="k">for</span> <span class="n">record</span> <span class="ow">in</span> <span class="n">itertools</span><span class="o">.</span><span class="n">chain</span><span class="p">(</span><span class="o">*</span><span class="n">thread_records</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">record</span><span class="o">.</span><span class="n">kind</span><span class="p">()</span> <span class="o">==</span> <span class="s1">&#39;mark&#39;</span><span class="p">:</span>
            <span class="k">continue</span>
        <span class="k">elif</span> <span class="n">record</span><span class="o">.</span><span class="n">kind</span><span class="p">()</span> <span class="o">==</span> <span class="s1">&#39;push&#39;</span><span class="p">:</span>
            <span class="n">record_stack</span><span class="o">.</span><span class="n">append</span><span class="p">((</span><span class="n">next_id</span><span class="p">,</span> <span class="n">record</span><span class="p">))</span>
            <span class="n">next_id</span> <span class="o">+=</span> <span class="mi">1</span>
        <span class="k">elif</span> <span class="n">record</span><span class="o">.</span><span class="n">kind</span><span class="p">()</span> <span class="o">==</span> <span class="s1">&#39;pop&#39;</span><span class="p">:</span>
            <span class="n">function_id</span><span class="p">,</span> <span class="n">start</span> <span class="o">=</span> <span class="n">record_stack</span><span class="o">.</span><span class="n">pop</span><span class="p">()</span>
            <span class="n">fe</span> <span class="o">=</span> <span class="n">FunctionEvent</span><span class="p">(</span>
                <span class="nb">id</span><span class="o">=</span><span class="n">function_id</span><span class="p">,</span>
                <span class="n">name</span><span class="o">=</span><span class="n">string_table</span><span class="p">[</span><span class="n">start</span><span class="o">.</span><span class="n">name</span><span class="p">()],</span>
                <span class="n">thread</span><span class="o">=</span><span class="n">start</span><span class="o">.</span><span class="n">thread_id</span><span class="p">(),</span>
                <span class="n">cpu_start</span><span class="o">=</span><span class="n">start_record</span><span class="o">.</span><span class="n">cpu_elapsed_us</span><span class="p">(</span><span class="n">start</span><span class="p">),</span>
                <span class="n">cpu_end</span><span class="o">=</span><span class="n">start_record</span><span class="o">.</span><span class="n">cpu_elapsed_us</span><span class="p">(</span><span class="n">record</span><span class="p">),</span>
                <span class="n">input_shapes</span><span class="o">=</span><span class="n">start</span><span class="o">.</span><span class="n">shapes</span><span class="p">())</span>
            <span class="k">if</span> <span class="n">start</span><span class="o">.</span><span class="n">has_cuda</span><span class="p">():</span>
                <span class="n">cuda_start</span> <span class="o">=</span> <span class="n">adjusted_time</span><span class="p">(</span><span class="n">start</span><span class="p">)</span>
                <span class="n">cuda_end</span> <span class="o">=</span> <span class="n">adjusted_time</span><span class="p">(</span><span class="n">record</span><span class="p">)</span>
                <span class="n">fe</span><span class="o">.</span><span class="n">append_kernel</span><span class="p">(</span><span class="n">start</span><span class="o">.</span><span class="n">name</span><span class="p">(),</span>
                                 <span class="n">start</span><span class="o">.</span><span class="n">device</span><span class="p">(),</span>
                                 <span class="n">cuda_start</span><span class="p">,</span>
                                 <span class="n">cuda_end</span><span class="p">)</span>
            <span class="n">functions</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">fe</span><span class="p">)</span>

    <span class="n">functions</span><span class="o">.</span><span class="n">sort</span><span class="p">(</span><span class="n">key</span><span class="o">=</span><span class="k">lambda</span> <span class="n">evt</span><span class="p">:</span> <span class="n">evt</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">start</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">functions</span>


<span class="c1">################################################################################</span>
<span class="c1"># CUDA checkpoints</span>

<span class="k">class</span> <span class="nc">EnforceUnique</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Raises an error if a key is seen more than once.&quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">seen</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">see</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">*</span><span class="n">key</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">key</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">seen</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s1">&#39;duplicate key: &#39;</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">key</span><span class="p">))</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">seen</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">key</span><span class="p">)</span>


<span class="k">def</span> <span class="nf">parse_nvprof_trace</span><span class="p">(</span><span class="n">path</span><span class="p">):</span>
    <span class="kn">import</span> <span class="nn">sqlite3</span>
    <span class="n">conn</span> <span class="o">=</span> <span class="n">sqlite3</span><span class="o">.</span><span class="n">connect</span><span class="p">(</span><span class="n">path</span><span class="p">)</span>
    <span class="n">conn</span><span class="o">.</span><span class="n">row_factory</span> <span class="o">=</span> <span class="n">sqlite3</span><span class="o">.</span><span class="n">Row</span>

    <span class="c1"># Parse strings table</span>
    <span class="n">strings</span> <span class="o">=</span> <span class="p">{}</span>
    <span class="k">for</span> <span class="n">r</span> <span class="ow">in</span> <span class="n">conn</span><span class="o">.</span><span class="n">execute</span><span class="p">(</span><span class="s2">&quot;SELECT _id_ as id, value FROM StringTable&quot;</span><span class="p">):</span>
        <span class="n">strings</span><span class="p">[</span><span class="n">r</span><span class="p">[</span><span class="s2">&quot;id&quot;</span><span class="p">]]</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_C</span><span class="o">.</span><span class="n">_demangle</span><span class="p">(</span><span class="n">r</span><span class="p">[</span><span class="s2">&quot;value&quot;</span><span class="p">])</span>

    <span class="c1"># First, find all functions and create FunctionEvents for them</span>
    <span class="n">marker_query</span> <span class="o">=</span> <span class="s2">&quot;&quot;&quot;</span>
<span class="s2">    SELECT</span>
<span class="s2">        start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time</span>
<span class="s2">    FROM</span>
<span class="s2">        CUPTI_ACTIVITY_KIND_MARKER AS start INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end</span>
<span class="s2">        ON start.id = end.id</span>
<span class="s2">    WHERE</span>
<span class="s2">        start.name != 0 AND end.name = 0</span>
<span class="s2">    &quot;&quot;&quot;</span>
    <span class="n">functions</span> <span class="o">=</span> <span class="p">[]</span>
    <span class="n">functions_map</span> <span class="o">=</span> <span class="p">{}</span>
    <span class="n">unique</span> <span class="o">=</span> <span class="n">EnforceUnique</span><span class="p">()</span>
    <span class="k">for</span> <span class="n">row</span> <span class="ow">in</span> <span class="n">conn</span><span class="o">.</span><span class="n">execute</span><span class="p">(</span><span class="n">marker_query</span><span class="p">):</span>
        <span class="n">unique</span><span class="o">.</span><span class="n">see</span><span class="p">(</span><span class="n">row</span><span class="p">[</span><span class="s1">&#39;marker_id&#39;</span><span class="p">])</span>
        <span class="n">evt</span> <span class="o">=</span> <span class="n">FunctionEvent</span><span class="p">(</span><span class="nb">id</span><span class="o">=</span><span class="n">row</span><span class="p">[</span><span class="s1">&#39;marker_id&#39;</span><span class="p">],</span>
                            <span class="n">name</span><span class="o">=</span><span class="n">strings</span><span class="p">[</span><span class="n">row</span><span class="p">[</span><span class="s1">&#39;name&#39;</span><span class="p">]],</span>
                            <span class="n">cpu_start</span><span class="o">=</span><span class="n">row</span><span class="p">[</span><span class="s1">&#39;start_time&#39;</span><span class="p">],</span>
                            <span class="n">cpu_end</span><span class="o">=</span><span class="n">row</span><span class="p">[</span><span class="s1">&#39;end_time&#39;</span><span class="p">],</span>
                            <span class="n">thread</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span>  <span class="c1"># TODO: find in sqlite database</span>
        <span class="n">functions</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">evt</span><span class="p">)</span>
        <span class="n">functions_map</span><span class="p">[</span><span class="n">evt</span><span class="o">.</span><span class="n">id</span><span class="p">]</span> <span class="o">=</span> <span class="n">evt</span>

    <span class="c1"># Now, correlate all kernels with FunctionEvents</span>
    <span class="n">kernel_query</span> <span class="o">=</span> <span class="s2">&quot;&quot;&quot;</span>
<span class="s2">    SELECT</span>
<span class="s2">        start.id AS marker_id, start.name, start.timestamp, end.timestamp,</span>
<span class="s2">        runtime._id_ AS runtime_id, runtime.cbid, runtime.start AS runtime_start, runtime.end AS runtime_end,</span>
<span class="s2">        kernel.start AS kernel_start, kernel.end AS kernel_end, kernel.name AS kernel_name</span>
<span class="s2">    FROM</span>
<span class="s2">        CUPTI_ACTIVITY_KIND_MARKER AS start</span>
<span class="s2">        INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end</span>
<span class="s2">            ON start.id = end.id</span>
<span class="s2">        INNER JOIN CUPTI_ACTIVITY_KIND_RUNTIME as runtime</span>
<span class="s2">            ON (start.timestamp &lt; runtime.start AND runtime.end &lt; end.timestamp)</span>
<span class="s2">        INNER JOIN CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL AS kernel</span>
<span class="s2">            ON kernel.correlationId = runtime.correlationId</span>
<span class="s2">    &quot;&quot;&quot;</span>
    <span class="n">unique</span> <span class="o">=</span> <span class="n">EnforceUnique</span><span class="p">()</span>
    <span class="k">for</span> <span class="n">row</span> <span class="ow">in</span> <span class="n">conn</span><span class="o">.</span><span class="n">execute</span><span class="p">(</span><span class="n">kernel_query</span><span class="p">):</span>
        <span class="n">unique</span><span class="o">.</span><span class="n">see</span><span class="p">(</span><span class="n">row</span><span class="p">[</span><span class="s1">&#39;marker_id&#39;</span><span class="p">],</span> <span class="n">row</span><span class="p">[</span><span class="s1">&#39;runtime_id&#39;</span><span class="p">])</span>
        <span class="k">assert</span> <span class="n">row</span><span class="p">[</span><span class="s1">&#39;cbid&#39;</span><span class="p">]</span> <span class="o">==</span> <span class="mi">13</span>  <span class="c1"># 13 == Launch</span>
        <span class="n">evt</span> <span class="o">=</span> <span class="n">functions_map</span><span class="p">[</span><span class="n">row</span><span class="p">[</span><span class="s1">&#39;marker_id&#39;</span><span class="p">]]</span>
        <span class="n">evt</span><span class="o">.</span><span class="n">append_kernel</span><span class="p">(</span><span class="n">row</span><span class="p">[</span><span class="s1">&#39;kernel_name&#39;</span><span class="p">],</span>
                          <span class="mi">0</span><span class="p">,</span>
                          <span class="n">row</span><span class="p">[</span><span class="s1">&#39;kernel_start&#39;</span><span class="p">],</span>
                          <span class="n">row</span><span class="p">[</span><span class="s1">&#39;kernel_end&#39;</span><span class="p">])</span>

    <span class="n">functions</span><span class="o">.</span><span class="n">sort</span><span class="p">(</span><span class="n">key</span><span class="o">=</span><span class="k">lambda</span> <span class="n">evt</span><span class="p">:</span> <span class="n">evt</span><span class="o">.</span><span class="n">cpu_interval</span><span class="o">.</span><span class="n">start</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">functions</span>


<span class="c1">################################################################################</span>
<span class="c1"># Pretty printer</span>


<span class="k">def</span> <span class="nf">build_table</span><span class="p">(</span><span class="n">events</span><span class="p">,</span> <span class="n">sort_by</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">header</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">row_limit</span><span class="o">=</span><span class="mi">100</span><span class="p">,</span> <span class="n">use_cuda</span><span class="o">=</span><span class="kc">True</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Prints a summary of events (which can be a list of FunctionEvent or FunctionEventAvg).&quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">events</span><span class="p">)</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
        <span class="k">return</span> <span class="s2">&quot;&quot;</span>

    <span class="k">if</span> <span class="n">sort_by</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">events</span> <span class="o">=</span> <span class="n">EventList</span><span class="p">(</span><span class="nb">sorted</span><span class="p">(</span>
            <span class="n">events</span><span class="p">,</span> <span class="n">key</span><span class="o">=</span><span class="k">lambda</span> <span class="n">evt</span><span class="p">:</span> <span class="nb">getattr</span><span class="p">(</span><span class="n">evt</span><span class="p">,</span> <span class="n">sort_by</span><span class="p">),</span> <span class="n">reverse</span><span class="o">=</span><span class="kc">True</span>
        <span class="p">),</span> <span class="n">use_cuda</span><span class="o">=</span><span class="n">use_cuda</span><span class="p">)</span>

    <span class="n">has_input_shapes</span> <span class="o">=</span> <span class="nb">any</span><span class="p">(</span>
        <span class="p">[</span><span class="n">event</span><span class="o">.</span><span class="n">input_shapes</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">for</span> <span class="n">event</span> <span class="ow">in</span> <span class="n">events</span><span class="p">])</span>
    <span class="n">name_column_width</span> <span class="o">=</span> <span class="nb">max</span><span class="p">([</span><span class="nb">len</span><span class="p">(</span><span class="n">evt</span><span class="o">.</span><span class="n">key</span><span class="p">)</span> <span class="k">for</span> <span class="n">evt</span> <span class="ow">in</span> <span class="n">events</span><span class="p">])</span> <span class="o">+</span> <span class="mi">4</span>
    <span class="n">DEFAULT_COLUMN_WIDTH</span> <span class="o">=</span> <span class="mi">15</span>
    <span class="n">SHAPES_COLUMN_WIDTH</span> <span class="o">=</span> <span class="mi">35</span>

    <span class="n">headers</span> <span class="o">=</span> <span class="p">[</span>
        <span class="s1">&#39;Name&#39;</span><span class="p">,</span>
        <span class="s1">&#39;Self CPU total %&#39;</span><span class="p">,</span>
        <span class="s1">&#39;Self CPU total&#39;</span><span class="p">,</span>
        <span class="s1">&#39;CPU total %&#39;</span><span class="p">,</span>
        <span class="s1">&#39;CPU total&#39;</span><span class="p">,</span>
        <span class="s1">&#39;CPU time avg&#39;</span><span class="p">,</span>
    <span class="p">]</span>
    <span class="k">if</span> <span class="n">use_cuda</span><span class="p">:</span>
        <span class="n">headers</span><span class="o">.</span><span class="n">extend</span><span class="p">([</span>
            <span class="s1">&#39;CUDA total %&#39;</span><span class="p">,</span>
            <span class="s1">&#39;CUDA total&#39;</span><span class="p">,</span>
            <span class="s1">&#39;CUDA time avg&#39;</span><span class="p">,</span>
        <span class="p">])</span>
    <span class="n">headers</span><span class="o">.</span><span class="n">append</span><span class="p">(</span>
        <span class="s1">&#39;Number of Calls&#39;</span>
    <span class="p">)</span>

    <span class="c1"># Have to use a list because nonlocal is Py3 only...</span>
    <span class="n">SPACING_SIZE</span> <span class="o">=</span> <span class="mi">2</span>
    <span class="n">row_format</span> <span class="o">=</span> <span class="p">[</span><span class="s2">&quot;&quot;</span><span class="p">]</span>
    <span class="n">header_sep</span> <span class="o">=</span> <span class="p">[</span><span class="s2">&quot;&quot;</span><span class="p">]</span>
    <span class="n">line_length</span> <span class="o">=</span> <span class="p">[</span><span class="o">-</span><span class="n">SPACING_SIZE</span><span class="p">]</span>

    <span class="k">def</span> <span class="nf">add_column</span><span class="p">(</span><span class="n">padding</span><span class="p">):</span>
        <span class="n">row_format</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">+=</span> <span class="s1">&#39;{: &lt;&#39;</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">padding</span><span class="p">)</span> <span class="o">+</span> <span class="s1">&#39;}  &#39;</span>
        <span class="n">header_sep</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">+=</span> <span class="s1">&#39;-&#39;</span> <span class="o">*</span> <span class="n">padding</span> <span class="o">+</span> <span class="s1">&#39;  &#39;</span>
        <span class="n">line_length</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">+=</span> <span class="n">padding</span> <span class="o">+</span> <span class="n">SPACING_SIZE</span>

    <span class="n">add_column</span><span class="p">(</span><span class="n">name_column_width</span><span class="p">)</span>
    <span class="k">for</span> <span class="n">_</span> <span class="ow">in</span> <span class="n">headers</span><span class="p">[</span><span class="mi">1</span><span class="p">:]:</span>
        <span class="n">add_column</span><span class="p">(</span><span class="n">DEFAULT_COLUMN_WIDTH</span><span class="p">)</span>

    <span class="k">if</span> <span class="n">has_input_shapes</span><span class="p">:</span>
        <span class="n">headers</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="s1">&#39;Input Shapes&#39;</span><span class="p">)</span>
        <span class="n">add_column</span><span class="p">(</span><span class="n">SHAPES_COLUMN_WIDTH</span><span class="p">)</span>

    <span class="n">row_format</span> <span class="o">=</span> <span class="n">row_format</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
    <span class="n">header_sep</span> <span class="o">=</span> <span class="n">header_sep</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
    <span class="n">line_length</span> <span class="o">=</span> <span class="n">line_length</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
    <span class="n">add_column</span> <span class="o">=</span> <span class="kc">None</span>

    <span class="c1"># Have to use a list because nonlocal is Py3 only...</span>
    <span class="n">result</span> <span class="o">=</span> <span class="p">[]</span>

    <span class="k">def</span> <span class="nf">append</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
        <span class="n">result</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">s</span><span class="p">)</span>
        <span class="n">result</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="s1">&#39;</span><span class="se">\n</span><span class="s1">&#39;</span><span class="p">)</span>  <span class="c1"># Yes, newline after the end as well</span>

    <span class="n">self_cpu_time_total</span> <span class="o">=</span> <span class="nb">sum</span><span class="p">([</span><span class="n">event</span><span class="o">.</span><span class="n">self_cpu_time_total</span> <span class="k">for</span> <span class="n">event</span> <span class="ow">in</span> <span class="n">events</span><span class="p">])</span>
    <span class="n">cuda_time_total</span> <span class="o">=</span> <span class="nb">sum</span><span class="p">([</span><span class="n">evt</span><span class="o">.</span><span class="n">cuda_time_total</span> <span class="k">for</span> <span class="n">evt</span> <span class="ow">in</span> <span class="n">events</span><span class="p">])</span>
    <span class="c1"># Actual printing</span>
    <span class="k">if</span> <span class="n">header</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">append</span><span class="p">(</span><span class="s1">&#39;=&#39;</span> <span class="o">*</span> <span class="n">line_length</span><span class="p">)</span>
        <span class="n">append</span><span class="p">(</span><span class="n">header</span><span class="p">)</span>
    <span class="n">append</span><span class="p">(</span><span class="n">header_sep</span><span class="p">)</span>
    <span class="n">append</span><span class="p">(</span><span class="n">row_format</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="o">*</span><span class="n">headers</span><span class="p">))</span>

    <span class="n">append</span><span class="p">(</span><span class="n">header_sep</span><span class="p">)</span>
    <span class="k">for</span> <span class="n">evt</span> <span class="ow">in</span> <span class="n">events</span><span class="p">[:</span><span class="n">row_limit</span><span class="p">]:</span>
        <span class="n">row_values</span> <span class="o">=</span> <span class="p">[</span>
            <span class="n">evt</span><span class="o">.</span><span class="n">key</span><span class="p">,</span>  <span class="c1"># Name</span>
            <span class="c1"># Self CPU total %</span>
            <span class="n">format_time_share</span><span class="p">(</span><span class="n">evt</span><span class="o">.</span><span class="n">self_cpu_time_total</span><span class="p">,</span>
                              <span class="n">self_cpu_time_total</span><span class="p">),</span>
            <span class="n">evt</span><span class="o">.</span><span class="n">self_cpu_time_total_str</span><span class="p">,</span>  <span class="c1"># Self CPU total</span>
            <span class="c1"># CPU total %</span>
            <span class="n">format_time_share</span><span class="p">(</span><span class="n">evt</span><span class="o">.</span><span class="n">cpu_time_total</span><span class="p">,</span> <span class="n">self_cpu_time_total</span><span class="p">),</span>
            <span class="n">evt</span><span class="o">.</span><span class="n">cpu_time_total_str</span><span class="p">,</span>  <span class="c1"># CPU total</span>
            <span class="n">evt</span><span class="o">.</span><span class="n">cpu_time_str</span><span class="p">,</span>  <span class="c1"># CPU time avg</span>
        <span class="p">]</span>
        <span class="k">if</span> <span class="n">use_cuda</span><span class="p">:</span>
            <span class="n">row_values</span><span class="o">.</span><span class="n">extend</span><span class="p">([</span>
                <span class="c1"># CUDA time total %</span>
                <span class="n">format_time_share</span><span class="p">(</span><span class="n">evt</span><span class="o">.</span><span class="n">cuda_time_total</span><span class="p">,</span> <span class="n">cuda_time_total</span><span class="p">),</span>
                <span class="n">evt</span><span class="o">.</span><span class="n">cuda_time_total_str</span><span class="p">,</span>
                <span class="n">evt</span><span class="o">.</span><span class="n">cuda_time_str</span><span class="p">,</span>  <span class="c1"># Cuda time avg</span>
            <span class="p">])</span>
        <span class="n">row_values</span><span class="o">.</span><span class="n">append</span><span class="p">(</span>
            <span class="n">evt</span><span class="o">.</span><span class="n">count</span><span class="p">,</span>  <span class="c1"># Number of calls</span>
        <span class="p">)</span>
        <span class="k">if</span> <span class="n">has_input_shapes</span><span class="p">:</span>
            <span class="n">row_values</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">str</span><span class="p">(</span><span class="n">evt</span><span class="o">.</span><span class="n">input_shapes</span><span class="p">)[:</span><span class="n">SHAPES_COLUMN_WIDTH</span><span class="p">])</span>
        <span class="n">append</span><span class="p">(</span><span class="n">row_format</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="o">*</span><span class="n">row_values</span><span class="p">))</span>

    <span class="n">append</span><span class="p">(</span><span class="n">header_sep</span><span class="p">)</span>
    <span class="n">append</span><span class="p">(</span><span class="s2">&quot;Self CPU time total: </span><span class="si">{}</span><span class="s2">&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">format_time</span><span class="p">(</span><span class="n">self_cpu_time_total</span><span class="p">)))</span>
    <span class="k">if</span> <span class="n">use_cuda</span><span class="p">:</span>
        <span class="n">append</span><span class="p">(</span><span class="s2">&quot;CUDA time total: </span><span class="si">{}</span><span class="s2">&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">format_time</span><span class="p">(</span><span class="n">cuda_time_total</span><span class="p">)))</span>
    <span class="k">return</span> <span class="s1">&#39;&#39;</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">result</span><span class="p">)</span>
</pre></div>

             </article>
             
            </div>
            <footer>
  

  

    <hr>

  

  <div role="contentinfo">
    <p>
        &copy; Copyright 2019, Torch Contributors.

    </p>
  </div>
    
      <div>
        Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
      </div>
     

</footer>

          </div>
        </div>

        <div class="pytorch-content-right" id="pytorch-content-right">
          <div class="pytorch-right-menu" id="pytorch-right-menu">
            <div class="pytorch-side-scroll" id="pytorch-side-scroll-right">
              
            </div>
          </div>
        </div>
      </section>
    </div>

  


  

     
       <script type="text/javascript" id="documentation_options" data-url_root="../../../" src="../../../_static/documentation_options.js"></script>
         <script src="../../../_static/jquery.js"></script>
         <script src="../../../_static/underscore.js"></script>
         <script src="../../../_static/doctools.js"></script>
         <script src="../../../_static/language_data.js"></script>
     

  

  <script type="text/javascript" src="../../../_static/js/vendor/popper.min.js"></script>
  <script type="text/javascript" src="../../../_static/js/vendor/bootstrap.min.js"></script>
  <script src="https://cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
  <script type="text/javascript" src="../../../_static/js/theme.js"></script>

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>
 
<script>
  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
  })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

  ga('create', 'UA-90545585-1', 'auto');
  ga('send', 'pageview');

</script>

<script async src="https://www.googletagmanager.com/gtag/js?id=UA-117752657-2"></script>

<script>
  window.dataLayer = window.dataLayer || [];

  function gtag(){dataLayer.push(arguments);}

  gtag('js', new Date());
  gtag('config', 'UA-117752657-2');
</script>

<img height="1" width="1" style="border-style:none;" alt="" src="https://www.googleadservices.com/pagead/conversion/795629140/?label=txkmCPmdtosBENSssfsC&amp;guid=ON&amp;script=0"/>


  <!-- Begin Footer -->

  <div class="container-fluid docs-tutorials-resources" id="docs-tutorials-resources">
    <div class="container">
      <div class="row">
        <div class="col-md-4 text-center">
          <h2>Docs</h2>
          <p>Access comprehensive developer documentation for PyTorch</p>
          <a class="with-right-arrow" href="https://pytorch.org/docs/stable/index.html">View Docs</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Tutorials</h2>
          <p>Get in-depth tutorials for beginners and advanced developers</p>
          <a class="with-right-arrow" href="https://pytorch.org/tutorials">View Tutorials</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Resources</h2>
          <p>Find development resources and get your questions answered</p>
          <a class="with-right-arrow" href="https://pytorch.org/resources">View Resources</a>
        </div>
      </div>
    </div>
  </div>

  <footer class="site-footer">
    <div class="container footer-container">
      <div class="footer-logo-wrapper">
        <a href="https://pytorch.org/" class="footer-logo"></a>
      </div>

      <div class="footer-links-wrapper">
        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/">PyTorch</a></li>
            <li><a href="https://pytorch.org/get-started">Get Started</a></li>
            <li><a href="https://pytorch.org/features">Features</a></li>
            <li><a href="https://pytorch.org/ecosystem">Ecosystem</a></li>
            <li><a href="https://pytorch.org/blog/">Blog</a></li>
            <li><a href="https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md">Contributing</a></li>
          </ul>
        </div>

        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/resources">Resources</a></li>
            <li><a href="https://pytorch.org/tutorials">Tutorials</a></li>
            <li><a href="https://pytorch.org/docs/stable/index.html">Docs</a></li>
            <li><a href="https://discuss.pytorch.org" target="_blank">Discuss</a></li>
            <li><a href="https://github.com/pytorch/pytorch/issues" target="_blank">Github Issues</a></li>
            <li><a href="https://pytorch.org/assets/brand-guidelines/PyTorch-Brand-Guidelines.pdf" target="_blank">Brand Guidelines</a></li>
          </ul>
        </div>

        <div class="footer-links-col follow-us-col">
          <ul>
            <li class="list-title">Stay Connected</li>
            <li>
              <div id="mc_embed_signup">
                <form
                  action="https://twitter.us14.list-manage.com/subscribe/post?u=75419c71fe0a935e53dfa4a3f&id=91d0dccd39"
                  method="post"
                  id="mc-embedded-subscribe-form"
                  name="mc-embedded-subscribe-form"
                  class="email-subscribe-form validate"
                  target="_blank"
                  novalidate>
                  <div id="mc_embed_signup_scroll" class="email-subscribe-form-fields-wrapper">
                    <div class="mc-field-group">
                      <label for="mce-EMAIL" style="display:none;">Email Address</label>
                      <input type="email" value="" name="EMAIL" class="required email" id="mce-EMAIL" placeholder="Email Address">
                    </div>

                    <div id="mce-responses" class="clear">
                      <div class="response" id="mce-error-response" style="display:none"></div>
                      <div class="response" id="mce-success-response" style="display:none"></div>
                    </div>    <!-- real people should not fill this in and expect good things - do not remove this or risk form bot signups-->

                    <div style="position: absolute; left: -5000px;" aria-hidden="true"><input type="text" name="b_75419c71fe0a935e53dfa4a3f_91d0dccd39" tabindex="-1" value=""></div>

                    <div class="clear">
                      <input type="submit" value="" name="subscribe" id="mc-embedded-subscribe" class="button email-subscribe-button">
                    </div>
                  </div>
                </form>
              </div>

            </li>
          </ul>

          <div class="footer-social-icons">
            <a href="https://www.facebook.com/pytorch" target="_blank" class="facebook"></a>
            <a href="https://twitter.com/pytorch" target="_blank" class="twitter"></a>
            <a href="https://www.youtube.com/pytorch" target="_blank" class="youtube"></a>
          </div>
        </div>
      </div>
    </div>
  </footer>

  <div class="cookie-banner-wrapper">
  <div class="container">
    <p class="gdpr-notice">To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: <a href="https://www.facebook.com/policies/cookies/">Cookies Policy</a>.</p>
    <img class="close-button" src="../../../_static/images/pytorch-x.svg">
  </div>
</div>

  <!-- End Footer -->

  <!-- Begin Mobile Menu -->

  <div class="mobile-main-menu">
    <div class="container-fluid">
      <div class="container">
        <div class="mobile-main-menu-header-container">
          <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
          <a class="main-menu-close-button" href="#" data-behavior="close-mobile-menu"></a>
        </div>
      </div>
    </div>

    <div class="mobile-main-menu-links-container">
      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <a href="https://pytorch.org/features">Features</a>
          </li>

          <li>
            <a href="https://pytorch.org/ecosystem">Ecosystem</a>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/hub">PyTorch Hub</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <a href="https://pytorch.org/resources">Resources</a>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>
    </div>
  </div>

  <!-- End Mobile Menu -->

  <script type="text/javascript" src="../../../_static/js/vendor/anchor.min.js"></script>

  <script type="text/javascript">
    $(document).ready(function() {
      mobileMenu.bind();
      mobileTOC.bind();
      pytorchAnchors.bind();
      sideMenus.bind();
      scrollToAnchor.bind();
      highlightNavigation.bind();
      mainMenuDropdown.bind();
      filterTags.bind();

      // Remove any empty p tags that Sphinx adds
      $("[data-tags='null']").remove();

      // Add class to links that have code blocks, since we cannot create links in code blocks
      $("article.pytorch-article a span.pre").each(function(e) {
        $(this).closest("a").addClass("has-code");
      });
    })
  </script>
</body>
</html>