


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>torch._utils &mdash; PyTorch master documentation</title>
  

  
  
  
  
    <link rel="canonical" href="https://pytorch.org/docs/stable/_modules/torch/_utils.html"/>
  

  

  
  
    

  

  <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  <!-- <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" /> -->
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/css/jit.css" type="text/css" />
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.11.1/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/katex-math.css" type="text/css" />
    <link rel="index" title="Index" href="../../genindex.html" />
    <link rel="search" title="Search" href="../../search.html" /> 

  
  <script src="../../_static/js/modernizr.min.js"></script>

  <!-- Preload the theme fonts -->

<link rel="preload" href="../../_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">

<!-- Preload the katex fonts -->

<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
</head>

<div class="container-fluid header-holder tutorials-header" id="header-holder">
  <div class="container">
    <div class="header-container">
      <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>

      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <div class="ecosystem-dropdown">
              <a id="dropdownMenuButton" data-toggle="ecosystem-dropdown">
                Ecosystem
              </a>
              <div class="ecosystem-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/hub"">
                  <span class=dropdown-title>Models (Beta)</span>
                  <p>Discover, publish, and reuse pre-trained models</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/ecosystem">
                  <span class=dropdown-title>Tools & Libraries</span>
                  <p>Explore the ecosystem of tools and libraries</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <div class="resources-dropdown">
              <a id="resourcesDropdownButton" data-toggle="resources-dropdown">
                Resources
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/resources"">
                  <span class=dropdown-title>Developer Resources</span>
                  <p>Find resources and get questions answered</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/features">
                  <span class=dropdown-title>About</span>
                  <p>Learn about PyTorch’s features and capabilities</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>

      <a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
    </div>

  </div>
</div>


<body class="pytorch-body">

   

    

    <div class="table-of-contents-link-wrapper">
      <span>Table of Contents</span>
      <a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
    </div>

    <nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
      <div class="pytorch-side-scroll">
        <div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          <div class="pytorch-left-menu-search">
            

            
              
              
                <div class="version">
                  master (1.5.0 )
                </div>
              
            

            


  


<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search Docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

            
          </div>

          
<div>
  <a style="color:#F05732" href="https://pytorch.org/docs/stable/_modules/torch/_utils.html">
    You are viewing unstable developer preview docs.
    Click here to view docs for latest stable release.
  </a>
</div>

            
            
              
            
            
              <p class="caption"><span class="caption-text">Notes</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../notes/amp_examples.html">Automatic Mixed Precision examples</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/autograd.html">Autograd mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/broadcasting.html">Broadcasting semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/cpu_threading_torchscript_inference.html">CPU threading and TorchScript inference</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/cuda.html">CUDA semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/ddp.html">Distributed Data Parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/extending.html">Extending PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/faq.html">Frequently Asked Questions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/large_scale_deployments.html">Features for large-scale deployments</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/multiprocessing.html">Multiprocessing best practices</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/randomness.html">Reproducibility</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/serialization.html">Serialization semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/windows.html">Windows FAQ</a></li>
</ul>
<p class="caption"><span class="caption-text">Language Bindings</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/cppdocs/">C++ API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../packages.html">Javadoc</a></li>
</ul>
<p class="caption"><span class="caption-text">Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../torch.html">torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../nn.html">torch.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../nn.functional.html">torch.nn.functional</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../tensors.html">torch.Tensor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../tensor_attributes.html">Tensor Attributes</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../tensor_view.html">Tensor Views</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../autograd.html">torch.autograd</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../cuda.html">torch.cuda</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../amp.html">torch.cuda.amp</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../distributed.html">torch.distributed</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../distributions.html">torch.distributions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../hub.html">torch.hub</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../jit.html">torch.jit</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../nn.init.html">torch.nn.init</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../onnx.html">torch.onnx</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../optim.html">torch.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../quantization.html">Quantization</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../rpc/index.html">Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../random.html">torch.random</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../sparse.html">torch.sparse</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../storage.html">torch.Storage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../bottleneck.html">torch.utils.bottleneck</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../checkpoint.html">torch.utils.checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../cpp_extension.html">torch.utils.cpp_extension</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../data.html">torch.utils.data</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../dlpack.html">torch.utils.dlpack</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../model_zoo.html">torch.utils.model_zoo</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../tensorboard.html">torch.utils.tensorboard</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../type_info.html">Type Info</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../named_tensor.html">Named Tensors</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../name_inference.html">Named Tensors operator coverage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../__config__.html">torch.__config__</a></li>
</ul>
<p class="caption"><span class="caption-text">Libraries</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/audio">torchaudio</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/text">torchtext</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/elastic/">TorchElastic</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/serve">TorchServe</a></li>
<li class="toctree-l1"><a class="reference external" href="http://pytorch.org/xla/">PyTorch on XLA Devices</a></li>
</ul>
<p class="caption"><span class="caption-text">Community</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../community/contribution_guide.html">PyTorch Contribution Guide</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../community/governance.html">PyTorch Governance</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../community/persons_of_interest.html">PyTorch Governance | Persons of Interest</a></li>
</ul>

            
          

        </div>
      </div>
    </nav>

    <div class="pytorch-container">
      <div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
        <div class="pytorch-breadcrumbs-wrapper">
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="pytorch-breadcrumbs">
    
      <li>
        <a href="../../index.html">
          
            Docs
          
        </a> &gt;
      </li>

        
          <li><a href="../index.html">Module code</a> &gt;</li>
        
          <li><a href="../torch.html">torch</a> &gt;</li>
        
      <li>torch._utils</li>
    
    
      <li class="pytorch-breadcrumbs-aside">
        
      </li>
    
  </ul>

  
</div>
        </div>

        <div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
          Shortcuts
        </div>
      </div>

      <section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
        <div class="pytorch-content-left">

        
          
          <div class="rst-content">
          
            <div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
             <article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
              
  <h1>Source code for torch._utils</h1><div class="highlight"><pre>
<span></span><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">warnings</span>
<span class="kn">from</span> <span class="nn">collections</span> <span class="kn">import</span> <span class="n">defaultdict</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">traceback</span>


<span class="k">def</span> <span class="nf">_type</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">non_blocking</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Returns the type if `dtype` is not provided, else casts this object to</span>
<span class="sd">    the specified type.</span>

<span class="sd">    If this is already of the correct type, no copy is performed and the</span>
<span class="sd">    original object is returned.</span>

<span class="sd">    Args:</span>
<span class="sd">        dtype (type or string): The desired type</span>
<span class="sd">        non_blocking (bool): If ``True``, and the source is in pinned memory</span>
<span class="sd">            and destination is on the GPU or vice versa, the copy is performed</span>
<span class="sd">            asynchronously with respect to the host. Otherwise, the argument</span>
<span class="sd">            has no effect.</span>
<span class="sd">        **kwargs: For compatibility, may contain the key ``async`` in place of</span>
<span class="sd">            the ``non_blocking`` argument. The ``async`` arg is deprecated.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">non_blocking</span> <span class="o">=</span> <span class="n">_get_async_or_non_blocking</span><span class="p">(</span><span class="s1">&#39;type&#39;</span><span class="p">,</span> <span class="n">non_blocking</span><span class="p">,</span> <span class="n">kwargs</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">dtype</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="vm">__module__</span> <span class="o">+</span> <span class="s1">&#39;.&#39;</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="vm">__class__</span><span class="o">.</span><span class="vm">__name__</span>

    <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">dtype</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
        <span class="n">dtype</span> <span class="o">=</span> <span class="n">_import_dotted_name</span><span class="p">(</span><span class="n">dtype</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">dtype</span> <span class="o">==</span> <span class="nb">type</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span>
    <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">is_sparse</span><span class="p">:</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">dtype</span><span class="o">.</span><span class="n">is_sparse</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;Cannot cast sparse tensor to dense tensor&quot;</span><span class="p">)</span>
        <span class="n">new_module_name</span> <span class="o">=</span> <span class="n">dtype</span><span class="o">.</span><span class="vm">__module__</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s1">&#39;.sparse&#39;</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">)</span>
        <span class="n">new_values_type_name</span> <span class="o">=</span> <span class="n">new_module_name</span> <span class="o">+</span> <span class="s1">&#39;.&#39;</span> <span class="o">+</span> <span class="n">dtype</span><span class="o">.</span><span class="vm">__name__</span>
        <span class="n">new_values</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_values</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="n">type</span><span class="p">(</span><span class="n">new_values_type_name</span><span class="p">,</span> <span class="n">non_blocking</span><span class="p">)</span>
        <span class="n">new_indices_type_name</span> <span class="o">=</span> <span class="n">new_module_name</span> <span class="o">+</span> <span class="s1">&#39;.LongTensor&#39;</span>
        <span class="n">new_indices</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_indices</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="n">type</span><span class="p">(</span><span class="n">new_indices_type_name</span><span class="p">,</span> <span class="n">non_blocking</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">dtype</span><span class="p">(</span><span class="n">new_indices</span><span class="p">,</span> <span class="n">new_values</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
    <span class="k">if</span> <span class="n">dtype</span><span class="o">.</span><span class="n">is_sparse</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;Cannot cast dense tensor to sparse tensor&quot;</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">dtype</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">size</span><span class="p">())</span><span class="o">.</span><span class="n">copy_</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">non_blocking</span><span class="p">)</span>


<span class="k">def</span> <span class="nf">_cuda</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">non_blocking</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Returns a copy of this object in CUDA memory.</span>

<span class="sd">    If this object is already in CUDA memory and on the correct device, then</span>
<span class="sd">    no copy is performed and the original object is returned.</span>

<span class="sd">    Args:</span>
<span class="sd">        device (int): The destination GPU id. Defaults to the current device.</span>
<span class="sd">        non_blocking (bool): If ``True`` and the source is in pinned memory,</span>
<span class="sd">            the copy will be asynchronous with respect to the host. Otherwise,</span>
<span class="sd">            the argument has no effect.</span>
<span class="sd">        **kwargs: For compatibility, may contain the key ``async`` in place of</span>
<span class="sd">            the ``non_blocking`` argument.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">non_blocking</span> <span class="o">=</span> <span class="n">_get_async_or_non_blocking</span><span class="p">(</span><span class="s1">&#39;cuda&#39;</span><span class="p">,</span> <span class="n">non_blocking</span><span class="p">,</span> <span class="n">kwargs</span><span class="p">)</span>
    <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">is_cuda</span><span class="p">:</span>
        <span class="k">if</span> <span class="n">device</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">current_device</span><span class="p">()</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">get_device</span><span class="p">()</span> <span class="o">==</span> <span class="n">device</span><span class="p">:</span>
            <span class="k">return</span> <span class="bp">self</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="k">if</span> <span class="n">device</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">device</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span>
    <span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="n">device</span><span class="p">):</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">is_sparse</span><span class="p">:</span>
            <span class="n">new_type</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">sparse</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="vm">__class__</span><span class="o">.</span><span class="vm">__name__</span><span class="p">)</span>
            <span class="n">indices</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_indices</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="n">cuda</span><span class="p">(</span><span class="n">device</span><span class="p">,</span> <span class="n">non_blocking</span><span class="p">)</span>
            <span class="n">values</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_values</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="n">cuda</span><span class="p">(</span><span class="n">device</span><span class="p">,</span> <span class="n">non_blocking</span><span class="p">)</span>
            <span class="k">return</span> <span class="n">new_type</span><span class="p">(</span><span class="n">indices</span><span class="p">,</span> <span class="n">values</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">new_type</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="vm">__class__</span><span class="o">.</span><span class="vm">__name__</span><span class="p">)</span>
            <span class="k">return</span> <span class="n">new_type</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">size</span><span class="p">())</span><span class="o">.</span><span class="n">copy_</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">non_blocking</span><span class="p">)</span>


<span class="k">def</span> <span class="nf">_get_async_or_non_blocking</span><span class="p">(</span><span class="n">function_name</span><span class="p">,</span> <span class="n">non_blocking</span><span class="p">,</span> <span class="n">kwargs</span><span class="p">):</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">kwargs</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">non_blocking</span>
    <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">kwargs</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">1</span> <span class="ow">or</span> <span class="s1">&#39;async&#39;</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">kwargs</span><span class="p">:</span>
        <span class="n">message</span> <span class="o">=</span> <span class="s2">&quot;</span><span class="si">{}</span><span class="s2">() got an unexpected keyword argument &#39;</span><span class="si">{}</span><span class="s2">&#39;&quot;</span>
        <span class="n">argument</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">kwargs</span><span class="o">.</span><span class="n">keys</span><span class="p">())</span><span class="o">.</span><span class="n">pop</span><span class="p">()</span>
        <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="n">message</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">function_name</span><span class="p">,</span> <span class="n">argument</span><span class="p">))</span>
    <span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span><span class="s2">&quot;&#39;async&#39; is deprecated; use &#39;non_blocking&#39;&quot;</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;async&#39;</span><span class="p">]</span>


<span class="c1"># Note [Don&#39;t serialize hooks]</span>
<span class="c1"># ~~~~~~~~~~~~~~~~~~~~~~~~~~~~</span>
<span class="c1"># Since time immemorial, we have serialized the backward hooks associated with</span>
<span class="c1"># variables.  This kind of half-worked--Python can pickle global functions</span>
<span class="c1"># (but not closures!)--but there were problems.</span>
<span class="c1">#</span>
<span class="c1">#   - It&#39;s fragile.  If you serialize a backward hook into a saved</span>
<span class="c1">#     model, and then you rename the function associated with the hook,</span>
<span class="c1">#     now your saved model is broken and you can&#39;t load it anymore.</span>
<span class="c1">#</span>
<span class="c1">#   - It&#39;s not actually used.  The standard recommendation is to</span>
<span class="c1">#     serialize the *state_dict* of a model, not the model itself</span>
<span class="c1">#     (since this is more stable to code changes affecting the model</span>
<span class="c1">#     serialization), and the state dict saves &quot;data&quot; only, thus</span>
<span class="c1">#     stripping the the backward hooks.  In some cases, hooks are</span>
<span class="c1">#     essential to the well-functioning of a model (e.g., DDP),</span>
<span class="c1">#     but DDP already manages readding the hooks!</span>
<span class="c1">#</span>
<span class="c1">#   - We didn&#39;t serialize them in many cases.  Prior to #10220, we</span>
<span class="c1">#     were dropping backward hooks in ForkingPickler.  We &quot;fixed&quot; this</span>
<span class="c1">#     to be convenient with other serialization sites, but lack of</span>
<span class="c1">#     serializing backward hooks wasn&#39;t actually the root cause of</span>
<span class="c1">#     the bug.</span>
<span class="c1">#</span>
<span class="c1"># With these cases in mind, we have decided that a better strategy</span>
<span class="c1"># is to just NOT serialize hooks at all.</span>
<span class="c1">#</span>
<span class="c1"># Since this is a BC-breaking change, we should warn when we previously</span>
<span class="c1"># serialized a hook, but no longer do so. This will be done by adding a special</span>
<span class="c1"># sentinel property to hooks will be used to suppress this warning. If a hook</span>
<span class="c1"># has the property _torch_serialize_ignore, we will not emit a warning if we</span>
<span class="c1"># attempt to serialize a Tensor with this hook attached to it.</span>
<span class="c1">#</span>
<span class="c1"># By the way, when _backward_hooks is skipped, we must give an EMPTY</span>
<span class="c1"># OrderedDict(), if you pass a None you&#39;ll run afoul #12219.</span>


<span class="k">def</span> <span class="nf">_rebuild_tensor</span><span class="p">(</span><span class="n">storage</span><span class="p">,</span> <span class="n">storage_offset</span><span class="p">,</span> <span class="n">size</span><span class="p">,</span> <span class="n">stride</span><span class="p">):</span>
    <span class="c1"># first construct a tensor with the correct dtype/device</span>
    <span class="n">t</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">storage</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">storage</span><span class="o">.</span><span class="n">device</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">t</span><span class="o">.</span><span class="n">set_</span><span class="p">(</span><span class="n">storage</span><span class="p">,</span> <span class="n">storage_offset</span><span class="p">,</span> <span class="n">size</span><span class="p">,</span> <span class="n">stride</span><span class="p">)</span>


<span class="k">def</span> <span class="nf">_rebuild_tensor_v2</span><span class="p">(</span><span class="n">storage</span><span class="p">,</span> <span class="n">storage_offset</span><span class="p">,</span> <span class="n">size</span><span class="p">,</span> <span class="n">stride</span><span class="p">,</span> <span class="n">requires_grad</span><span class="p">,</span> <span class="n">backward_hooks</span><span class="p">):</span>
    <span class="n">tensor</span> <span class="o">=</span> <span class="n">_rebuild_tensor</span><span class="p">(</span><span class="n">storage</span><span class="p">,</span> <span class="n">storage_offset</span><span class="p">,</span> <span class="n">size</span><span class="p">,</span> <span class="n">stride</span><span class="p">)</span>
    <span class="n">tensor</span><span class="o">.</span><span class="n">requires_grad</span> <span class="o">=</span> <span class="n">requires_grad</span>
    <span class="c1"># NB: This line exists only for backwards compatibility; the</span>
    <span class="c1"># general expectation is that backward_hooks is an empty</span>
    <span class="c1"># OrderedDict.  See Note [Don&#39;t serialize hooks]</span>
    <span class="n">tensor</span><span class="o">.</span><span class="n">_backward_hooks</span> <span class="o">=</span> <span class="n">backward_hooks</span>
    <span class="k">return</span> <span class="n">tensor</span>


<span class="k">def</span> <span class="nf">_rebuild_sparse_tensor</span><span class="p">(</span><span class="n">layout</span><span class="p">,</span> <span class="n">data</span><span class="p">):</span>
    <span class="k">if</span> <span class="n">layout</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo</span><span class="p">:</span>
        <span class="n">indices</span><span class="p">,</span> <span class="n">values</span><span class="p">,</span> <span class="n">size</span> <span class="o">=</span> <span class="n">data</span>
        <span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo_tensor</span><span class="p">(</span><span class="n">indices</span><span class="p">,</span> <span class="n">values</span><span class="p">,</span> <span class="n">size</span><span class="p">)</span>
    <span class="k">raise</span> <span class="ne">NotImplementedError</span><span class="p">(</span><span class="s2">&quot;rebuilding sparse tensor for layout </span><span class="si">%s</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">layout</span><span class="p">))</span>


<span class="k">def</span> <span class="nf">_rebuild_xla_tensor</span><span class="p">(</span><span class="n">data</span><span class="p">,</span> <span class="n">dtype</span><span class="p">,</span> <span class="n">device</span><span class="p">,</span> <span class="n">requires_grad</span><span class="p">):</span>
    <span class="n">tensor</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">(</span><span class="n">data</span><span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
    <span class="n">tensor</span><span class="o">.</span><span class="n">requires_grad</span> <span class="o">=</span> <span class="n">requires_grad</span>
    <span class="k">return</span> <span class="n">tensor</span>


<span class="k">def</span> <span class="nf">_rebuild_qtensor</span><span class="p">(</span><span class="n">storage</span><span class="p">,</span> <span class="n">storage_offset</span><span class="p">,</span> <span class="n">size</span><span class="p">,</span> <span class="n">stride</span><span class="p">,</span> <span class="n">quantizer_params</span><span class="p">,</span> <span class="n">requires_grad</span><span class="p">,</span> <span class="n">backward_hooks</span><span class="p">):</span>
    <span class="n">qscheme</span> <span class="o">=</span> <span class="n">quantizer_params</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
    <span class="k">if</span> <span class="n">qscheme</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">per_tensor_affine</span><span class="p">:</span>
        <span class="n">_</span><span class="p">,</span> <span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span> <span class="o">=</span> <span class="n">quantizer_params</span>
        <span class="n">tensor</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_empty_affine_quantized</span><span class="p">(</span><span class="n">size</span><span class="p">,</span> <span class="n">scale</span><span class="o">=</span><span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span><span class="o">=</span><span class="n">zero_point</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">storage</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
    <span class="k">elif</span> <span class="n">qscheme</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">per_channel_affine</span><span class="p">:</span>
        <span class="n">_</span><span class="p">,</span> <span class="n">scales</span><span class="p">,</span> <span class="n">zero_points</span><span class="p">,</span> <span class="n">axis</span> <span class="o">=</span> <span class="n">quantizer_params</span>
        <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">scales</span><span class="p">)</span> <span class="ow">is</span> <span class="nb">list</span> <span class="ow">and</span> <span class="nb">type</span><span class="p">(</span><span class="n">zero_points</span><span class="p">)</span> <span class="ow">is</span> <span class="nb">list</span><span class="p">:</span>
            <span class="n">scales</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="n">scales</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">double</span><span class="p">)</span>
            <span class="n">zero_points</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="n">zero_points</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">long</span><span class="p">)</span>
        <span class="n">tensor</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_empty_per_channel_affine_quantized</span><span class="p">(</span>
            <span class="n">size</span><span class="p">,</span> <span class="n">scales</span><span class="o">=</span><span class="n">scales</span><span class="p">,</span> <span class="n">zero_points</span><span class="o">=</span><span class="n">zero_points</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="n">axis</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">storage</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;Can&#39;t deserialize quantized tensor with qscheme </span><span class="si">{}</span><span class="s2">&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">qscheme</span><span class="p">))</span>
    <span class="n">tensor</span><span class="o">.</span><span class="n">set_</span><span class="p">(</span><span class="n">storage</span><span class="p">,</span> <span class="n">storage_offset</span><span class="p">,</span> <span class="n">size</span><span class="p">,</span> <span class="n">stride</span><span class="p">)</span>
    <span class="n">tensor</span><span class="o">.</span><span class="n">requires_grad</span> <span class="o">=</span> <span class="n">requires_grad</span>
    <span class="c1"># NB: This line exists only for backwards compatibility; the</span>
    <span class="c1"># general expectation is that backward_hooks is an empty</span>
    <span class="c1"># OrderedDict.  See Note [Don&#39;t serialize hooks]</span>
    <span class="n">tensor</span><span class="o">.</span><span class="n">_backward_hooks</span> <span class="o">=</span> <span class="n">backward_hooks</span>
    <span class="k">return</span> <span class="n">tensor</span>

<span class="k">def</span> <span class="nf">_rebuild_parameter</span><span class="p">(</span><span class="n">data</span><span class="p">,</span> <span class="n">requires_grad</span><span class="p">,</span> <span class="n">backward_hooks</span><span class="p">):</span>
    <span class="n">param</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">data</span><span class="p">,</span> <span class="n">requires_grad</span><span class="p">)</span>
    <span class="c1"># NB: This line exists only for backwards compatibility; the</span>
    <span class="c1"># general expectation is that backward_hooks is an empty</span>
    <span class="c1"># OrderedDict.  See Note [Don&#39;t serialize hooks]</span>
    <span class="n">param</span><span class="o">.</span><span class="n">_backward_hooks</span> <span class="o">=</span> <span class="n">backward_hooks</span>

    <span class="k">return</span> <span class="n">param</span>


<span class="k">def</span> <span class="nf">_import_dotted_name</span><span class="p">(</span><span class="n">name</span><span class="p">):</span>
    <span class="n">components</span> <span class="o">=</span> <span class="n">name</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s1">&#39;.&#39;</span><span class="p">)</span>
    <span class="n">obj</span> <span class="o">=</span> <span class="nb">__import__</span><span class="p">(</span><span class="n">components</span><span class="p">[</span><span class="mi">0</span><span class="p">])</span>
    <span class="k">for</span> <span class="n">component</span> <span class="ow">in</span> <span class="n">components</span><span class="p">[</span><span class="mi">1</span><span class="p">:]:</span>
        <span class="n">obj</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="n">obj</span><span class="p">,</span> <span class="n">component</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">obj</span>


<span class="c1"># Taken from python 3.5 docs</span>
<span class="k">def</span> <span class="nf">_accumulate</span><span class="p">(</span><span class="n">iterable</span><span class="p">,</span> <span class="n">fn</span><span class="o">=</span><span class="k">lambda</span> <span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">:</span> <span class="n">x</span> <span class="o">+</span> <span class="n">y</span><span class="p">):</span>
    <span class="s1">&#39;Return running totals&#39;</span>
    <span class="c1"># _accumulate([1,2,3,4,5]) --&gt; 1 3 6 10 15</span>
    <span class="c1"># _accumulate([1,2,3,4,5], operator.mul) --&gt; 1 2 6 24 120</span>
    <span class="n">it</span> <span class="o">=</span> <span class="nb">iter</span><span class="p">(</span><span class="n">iterable</span><span class="p">)</span>
    <span class="k">try</span><span class="p">:</span>
        <span class="n">total</span> <span class="o">=</span> <span class="nb">next</span><span class="p">(</span><span class="n">it</span><span class="p">)</span>
    <span class="k">except</span> <span class="ne">StopIteration</span><span class="p">:</span>
        <span class="k">return</span>
    <span class="k">yield</span> <span class="n">total</span>
    <span class="k">for</span> <span class="n">element</span> <span class="ow">in</span> <span class="n">it</span><span class="p">:</span>
        <span class="n">total</span> <span class="o">=</span> <span class="n">fn</span><span class="p">(</span><span class="n">total</span><span class="p">,</span> <span class="n">element</span><span class="p">)</span>
        <span class="k">yield</span> <span class="n">total</span>


<span class="k">def</span> <span class="nf">_flatten_dense_tensors</span><span class="p">(</span><span class="n">tensors</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of</span>
<span class="sd">    same dense type.</span>

<span class="sd">    Since inputs are dense, the resulting tensor will be a concatenated 1D</span>
<span class="sd">    buffer. Element-wise operation on this buffer will be equivalent to</span>
<span class="sd">    operating individually.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        tensors (Iterable[Tensor]): dense tensors to flatten.</span>

<span class="sd">    Returns:</span>
<span class="sd">        A contiguous 1D buffer containing input tensors.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span> <span class="o">==</span> <span class="mi">1</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">tensors</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">contiguous</span><span class="p">()</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span>
    <span class="n">flat</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">([</span><span class="n">t</span><span class="o">.</span><span class="n">contiguous</span><span class="p">()</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">],</span> <span class="n">dim</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">flat</span>


<span class="k">def</span> <span class="nf">_flatten_sparse_tensors</span><span class="p">(</span><span class="n">tensors</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Flatten sparse tensors into two contiguous 1D buffers, one of indices and</span>
<span class="sd">    one of values. Assume tensors are of same sparse type.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        tensors (Iterable[Tensor]): sparse tensors to flatten.</span>

<span class="sd">    Returns:</span>
<span class="sd">        A tuple of two contiguous 1D buffers, one containing input tensors&#39;</span>
<span class="sd">        indices and the other containing the values.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">flat_indices</span> <span class="o">=</span> <span class="n">_flatten_dense_tensors</span><span class="p">([</span><span class="n">torch</span><span class="o">.</span><span class="n">_indices</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">])</span>
    <span class="n">flat_values</span> <span class="o">=</span> <span class="n">_flatten_dense_tensors</span><span class="p">([</span><span class="n">torch</span><span class="o">.</span><span class="n">_values</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">])</span>
    <span class="k">return</span> <span class="n">flat_indices</span><span class="p">,</span> <span class="n">flat_values</span>


<span class="k">def</span> <span class="nf">_unflatten_dense_tensors</span><span class="p">(</span><span class="n">flat</span><span class="p">,</span> <span class="n">tensors</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;View a flat buffer using the sizes of tensors. Assume that tensors are of</span>
<span class="sd">    same dense type, and that flat is given by _flatten_dense_tensors.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        flat (Tensor): flattened dense tensors to unflatten.</span>
<span class="sd">        tensors (Iterable[Tensor]): dense tensors whose sizes will be used to</span>
<span class="sd">          unflatten flat.</span>

<span class="sd">    Returns:</span>
<span class="sd">        Unflattened dense tensors with sizes same as tensors and values from</span>
<span class="sd">        flat.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">outputs</span> <span class="o">=</span> <span class="p">[]</span>
    <span class="n">offset</span> <span class="o">=</span> <span class="mi">0</span>
    <span class="k">for</span> <span class="n">tensor</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">:</span>
        <span class="n">numel</span> <span class="o">=</span> <span class="n">tensor</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span>
        <span class="n">outputs</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">flat</span><span class="o">.</span><span class="n">narrow</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">offset</span><span class="p">,</span> <span class="n">numel</span><span class="p">)</span><span class="o">.</span><span class="n">view_as</span><span class="p">(</span><span class="n">tensor</span><span class="p">))</span>
        <span class="n">offset</span> <span class="o">+=</span> <span class="n">numel</span>
    <span class="k">return</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">outputs</span><span class="p">)</span>


<span class="k">def</span> <span class="nf">_unflatten_sparse_tensors</span><span class="p">(</span><span class="n">flat</span><span class="p">,</span> <span class="n">tensors</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;View flat buffer (containing indices and values) using the sizes of</span>
<span class="sd">    tensors. Assume that tensors are of same sparse type, and that flat is given</span>
<span class="sd">    by _flatten_sparse_tensors.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        flat (tuple(Tensor, Tensor)): flattened indices and values of sparse</span>
<span class="sd">          tensors to unflatten.</span>
<span class="sd">        tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to</span>
<span class="sd">          unflatten flat.</span>

<span class="sd">    Returns:</span>
<span class="sd">        Unflattened sparse tensors with sizes same as tensors and values from</span>
<span class="sd">        flat.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">flat_indices</span><span class="p">,</span> <span class="n">flat_values</span> <span class="o">=</span> <span class="n">flat</span>
    <span class="n">indices</span> <span class="o">=</span> <span class="n">_unflatten_dense_tensors</span><span class="p">(</span><span class="n">flat_indices</span><span class="p">,</span> <span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">_indices</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">])</span>
    <span class="n">values</span> <span class="o">=</span> <span class="n">_unflatten_dense_tensors</span><span class="p">(</span><span class="n">flat_values</span><span class="p">,</span> <span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">_values</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">])</span>
    <span class="n">outputs</span> <span class="o">=</span> <span class="p">[]</span>
    <span class="k">for</span> <span class="n">t</span><span class="p">,</span> <span class="n">i</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">tensors</span><span class="p">,</span> <span class="n">indices</span><span class="p">,</span> <span class="n">values</span><span class="p">):</span>
        <span class="n">outputs</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">t</span><span class="o">.</span><span class="n">new</span><span class="p">(</span><span class="n">i</span><span class="p">,</span> <span class="n">v</span><span class="p">,</span> <span class="n">t</span><span class="o">.</span><span class="n">size</span><span class="p">()))</span>
    <span class="k">return</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">outputs</span><span class="p">)</span>


<span class="k">def</span> <span class="nf">_reorder_tensors_as</span><span class="p">(</span><span class="n">tensors</span><span class="p">,</span> <span class="n">ordered_tensors</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Assume that tensors are of same order as ordered_tensors within their</span>
<span class="sd">    types, e.g., from _take_tensors. Reorder them to be of same order as</span>
<span class="sd">    ordered_tensors.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        tensors (Iterable[Tensor]): tensors to be reordered. They should be of</span>
<span class="sd">          the same order as ordered_tensors within their own types.</span>
<span class="sd">        ordered_tensors (Iterable[Tensor]): tensors whose order will be the</span>
<span class="sd">          reference.</span>

<span class="sd">    Returns:</span>
<span class="sd">        Ordered tuple of tensors with contents from tensors and order of</span>
<span class="sd">        ordered_tensors.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">type_dict</span> <span class="o">=</span> <span class="n">defaultdict</span><span class="p">(</span><span class="nb">list</span><span class="p">)</span>
    <span class="k">for</span> <span class="n">tensor</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">:</span>
        <span class="n">type_dict</span><span class="p">[</span><span class="n">tensor</span><span class="o">.</span><span class="n">type</span><span class="p">()]</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">tensor</span><span class="p">)</span>
    <span class="n">type_dict</span> <span class="o">=</span> <span class="p">{</span><span class="n">t</span><span class="p">:</span> <span class="nb">iter</span><span class="p">(</span><span class="n">coll</span><span class="p">)</span> <span class="k">for</span> <span class="n">t</span><span class="p">,</span> <span class="n">coll</span> <span class="ow">in</span> <span class="n">type_dict</span><span class="o">.</span><span class="n">items</span><span class="p">()}</span>
    <span class="k">return</span> <span class="nb">tuple</span><span class="p">(</span><span class="nb">next</span><span class="p">(</span><span class="n">type_dict</span><span class="p">[</span><span class="n">tensor</span><span class="o">.</span><span class="n">type</span><span class="p">()])</span> <span class="k">for</span> <span class="n">tensor</span> <span class="ow">in</span> <span class="n">ordered_tensors</span><span class="p">)</span>


<span class="k">def</span> <span class="nf">_take_tensors</span><span class="p">(</span><span class="n">tensors</span><span class="p">,</span> <span class="n">size_limit</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Group tensors into chunks. This generator yields a chunk at each time,</span>
<span class="sd">    each containing tensors of same type up to certain byte limit in total size.</span>

<span class="sd">    Args:</span>
<span class="sd">        tensors (Sequence): A sequence of tensors to be separated into chunks.</span>
<span class="sd">        size_limit (int): The limit of each chunk in bytes.</span>

<span class="sd">    Yields:</span>
<span class="sd">        Blocks of tensors of same type and within size_limit. The yielded</span>
<span class="sd">        tensors are only ordered as the original sequence within its types.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">buf_dict</span> <span class="o">=</span> <span class="n">defaultdict</span><span class="p">(</span><span class="k">lambda</span><span class="p">:</span> <span class="p">[[],</span> <span class="mi">0</span><span class="p">])</span>
    <span class="k">for</span> <span class="n">tensor</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">:</span>
        <span class="n">t</span> <span class="o">=</span> <span class="n">tensor</span><span class="o">.</span><span class="n">type</span><span class="p">()</span>
        <span class="k">if</span> <span class="n">tensor</span><span class="o">.</span><span class="n">is_sparse</span><span class="p">:</span>
            <span class="n">indices</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_indices</span><span class="p">(</span><span class="n">tensor</span><span class="p">)</span>
            <span class="n">values</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_values</span><span class="p">(</span><span class="n">tensor</span><span class="p">)</span>
            <span class="n">size</span> <span class="o">=</span> <span class="n">indices</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span> <span class="o">*</span> <span class="n">indices</span><span class="o">.</span><span class="n">element_size</span><span class="p">()</span> <span class="o">+</span> <span class="n">values</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span> <span class="o">*</span> <span class="n">values</span><span class="o">.</span><span class="n">element_size</span><span class="p">()</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">size</span> <span class="o">=</span> <span class="n">tensor</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span> <span class="o">*</span> <span class="n">tensor</span><span class="o">.</span><span class="n">element_size</span><span class="p">()</span>
        <span class="n">buf_and_size</span> <span class="o">=</span> <span class="n">buf_dict</span><span class="p">[</span><span class="n">t</span><span class="p">]</span>
        <span class="k">if</span> <span class="n">buf_and_size</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">+</span> <span class="n">size</span> <span class="o">&gt;</span> <span class="n">size_limit</span> <span class="ow">and</span> <span class="n">buf_and_size</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
            <span class="k">yield</span> <span class="n">buf_and_size</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
            <span class="n">buf_and_size</span> <span class="o">=</span> <span class="n">buf_dict</span><span class="p">[</span><span class="n">t</span><span class="p">]</span> <span class="o">=</span> <span class="p">[[],</span> <span class="mi">0</span><span class="p">]</span>
        <span class="n">buf_and_size</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">tensor</span><span class="p">)</span>
        <span class="n">buf_and_size</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">+=</span> <span class="n">size</span>
    <span class="k">for</span> <span class="n">buf</span><span class="p">,</span> <span class="n">_</span> <span class="ow">in</span> <span class="n">buf_dict</span><span class="o">.</span><span class="n">values</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">buf</span><span class="p">)</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
            <span class="k">yield</span> <span class="n">buf</span>


<span class="c1"># annotation decorator to get annotations in a way that is compatible</span>
<span class="c1"># with both Python 2 and 3</span>
<span class="k">def</span> <span class="nf">annotate</span><span class="p">(</span><span class="n">ret</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
    <span class="k">def</span> <span class="nf">dec</span><span class="p">(</span><span class="n">fun</span><span class="p">):</span>
        <span class="n">fun</span><span class="o">.</span><span class="vm">__annotations__</span> <span class="o">=</span> <span class="nb">dict</span><span class="p">(</span><span class="n">kwargs</span><span class="p">)</span>
        <span class="n">fun</span><span class="o">.</span><span class="vm">__annotations__</span><span class="p">[</span><span class="s1">&#39;return&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">ret</span>
        <span class="k">return</span> <span class="n">fun</span>
    <span class="k">return</span> <span class="n">dec</span>


<span class="c1"># NOTE [ Python Traceback Reference Cycle Problem ]</span>
<span class="c1">#</span>
<span class="c1"># When using sys.exc_info(), it is important to **not** store the exc_info[2],</span>
<span class="c1"># which is the traceback, because otherwise you will run into the traceback</span>
<span class="c1"># reference cycle problem, i.e., the traceback holding reference to the frame,</span>
<span class="c1"># and the frame (which holds reference to all the object in its temporary scope)</span>
<span class="c1"># holding reference the traceback.</span>

<span class="k">class</span> <span class="nc">KeyErrorMessage</span><span class="p">(</span><span class="nb">str</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;str subclass that returns itself in repr&quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__repr__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span>


<span class="k">class</span> <span class="nc">ExceptionWrapper</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Wraps an exception plus traceback to communicate across threads&quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">exc_info</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">where</span><span class="o">=</span><span class="s2">&quot;in background&quot;</span><span class="p">):</span>
        <span class="c1"># It is important that we don&#39;t store exc_info, see</span>
        <span class="c1"># NOTE [ Python Traceback Reference Cycle Problem ]</span>
        <span class="k">if</span> <span class="n">exc_info</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">exc_info</span> <span class="o">=</span> <span class="n">sys</span><span class="o">.</span><span class="n">exc_info</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">exc_type</span> <span class="o">=</span> <span class="n">exc_info</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">exc_msg</span> <span class="o">=</span> <span class="s2">&quot;&quot;</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">traceback</span><span class="o">.</span><span class="n">format_exception</span><span class="p">(</span><span class="o">*</span><span class="n">exc_info</span><span class="p">))</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">where</span> <span class="o">=</span> <span class="n">where</span>

    <span class="k">def</span> <span class="nf">reraise</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Reraises the wrapped exception in the current thread&quot;&quot;&quot;</span>
        <span class="c1"># Format a message such as: &quot;Caught ValueError in DataLoader worker</span>
        <span class="c1"># process 2. Original Traceback:&quot;, followed by the traceback.</span>
        <span class="n">msg</span> <span class="o">=</span> <span class="s2">&quot;Caught </span><span class="si">{}</span><span class="s2"> </span><span class="si">{}</span><span class="s2">.</span><span class="se">\n</span><span class="s2">Original </span><span class="si">{}</span><span class="s2">&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">exc_type</span><span class="o">.</span><span class="vm">__name__</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">where</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">exc_msg</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">exc_type</span> <span class="o">==</span> <span class="ne">KeyError</span><span class="p">:</span>
            <span class="c1"># KeyError calls repr() on its argument (usually a dict key). This</span>
            <span class="c1"># makes stack traces unreadable. It will not be changed in Python</span>
            <span class="c1"># (https://bugs.python.org/issue2651), so we work around it.</span>
            <span class="n">msg</span> <span class="o">=</span> <span class="n">KeyErrorMessage</span><span class="p">(</span><span class="n">msg</span><span class="p">)</span>
        <span class="k">raise</span> <span class="bp">self</span><span class="o">.</span><span class="n">exc_type</span><span class="p">(</span><span class="n">msg</span><span class="p">)</span>
</pre></div>

             </article>
             
            </div>
            <footer>
  

  

    <hr>

  

  <div role="contentinfo">
    <p>
        &copy; Copyright 2019, Torch Contributors.

    </p>
  </div>
    
      <div>
        Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
      </div>
     

</footer>

          </div>
        </div>

        <div class="pytorch-content-right" id="pytorch-content-right">
          <div class="pytorch-right-menu" id="pytorch-right-menu">
            <div class="pytorch-side-scroll" id="pytorch-side-scroll-right">
              
            </div>
          </div>
        </div>
      </section>
    </div>

  


  

     
       <script type="text/javascript" id="documentation_options" data-url_root="../../" src="../../_static/documentation_options.js"></script>
         <script src="../../_static/jquery.js"></script>
         <script src="../../_static/underscore.js"></script>
         <script src="../../_static/doctools.js"></script>
         <script src="../../_static/language_data.js"></script>
     

  

  <script type="text/javascript" src="../../_static/js/vendor/popper.min.js"></script>
  <script type="text/javascript" src="../../_static/js/vendor/bootstrap.min.js"></script>
  <script src="https://cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
  <script type="text/javascript" src="../../_static/js/theme.js"></script>

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>
 
<script>
  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
  })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

  ga('create', 'UA-90545585-1', 'auto');
  ga('send', 'pageview');

</script>

<script async src="https://www.googletagmanager.com/gtag/js?id=UA-117752657-2"></script>

<script>
  window.dataLayer = window.dataLayer || [];

  function gtag(){dataLayer.push(arguments);}

  gtag('js', new Date());
  gtag('config', 'UA-117752657-2');
</script>

<img height="1" width="1" style="border-style:none;" alt="" src="https://www.googleadservices.com/pagead/conversion/795629140/?label=txkmCPmdtosBENSssfsC&amp;guid=ON&amp;script=0"/>


  <!-- Begin Footer -->

  <div class="container-fluid docs-tutorials-resources" id="docs-tutorials-resources">
    <div class="container">
      <div class="row">
        <div class="col-md-4 text-center">
          <h2>Docs</h2>
          <p>Access comprehensive developer documentation for PyTorch</p>
          <a class="with-right-arrow" href="https://pytorch.org/docs/stable/index.html">View Docs</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Tutorials</h2>
          <p>Get in-depth tutorials for beginners and advanced developers</p>
          <a class="with-right-arrow" href="https://pytorch.org/tutorials">View Tutorials</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Resources</h2>
          <p>Find development resources and get your questions answered</p>
          <a class="with-right-arrow" href="https://pytorch.org/resources">View Resources</a>
        </div>
      </div>
    </div>
  </div>

  <footer class="site-footer">
    <div class="container footer-container">
      <div class="footer-logo-wrapper">
        <a href="https://pytorch.org/" class="footer-logo"></a>
      </div>

      <div class="footer-links-wrapper">
        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/">PyTorch</a></li>
            <li><a href="https://pytorch.org/get-started">Get Started</a></li>
            <li><a href="https://pytorch.org/features">Features</a></li>
            <li><a href="https://pytorch.org/ecosystem">Ecosystem</a></li>
            <li><a href="https://pytorch.org/blog/">Blog</a></li>
            <li><a href="https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md">Contributing</a></li>
          </ul>
        </div>

        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/resources">Resources</a></li>
            <li><a href="https://pytorch.org/tutorials">Tutorials</a></li>
            <li><a href="https://pytorch.org/docs/stable/index.html">Docs</a></li>
            <li><a href="https://discuss.pytorch.org" target="_blank">Discuss</a></li>
            <li><a href="https://github.com/pytorch/pytorch/issues" target="_blank">Github Issues</a></li>
            <li><a href="https://pytorch.org/assets/brand-guidelines/PyTorch-Brand-Guidelines.pdf" target="_blank">Brand Guidelines</a></li>
          </ul>
        </div>

        <div class="footer-links-col follow-us-col">
          <ul>
            <li class="list-title">Stay Connected</li>
            <li>
              <div id="mc_embed_signup">
                <form
                  action="https://twitter.us14.list-manage.com/subscribe/post?u=75419c71fe0a935e53dfa4a3f&id=91d0dccd39"
                  method="post"
                  id="mc-embedded-subscribe-form"
                  name="mc-embedded-subscribe-form"
                  class="email-subscribe-form validate"
                  target="_blank"
                  novalidate>
                  <div id="mc_embed_signup_scroll" class="email-subscribe-form-fields-wrapper">
                    <div class="mc-field-group">
                      <label for="mce-EMAIL" style="display:none;">Email Address</label>
                      <input type="email" value="" name="EMAIL" class="required email" id="mce-EMAIL" placeholder="Email Address">
                    </div>

                    <div id="mce-responses" class="clear">
                      <div class="response" id="mce-error-response" style="display:none"></div>
                      <div class="response" id="mce-success-response" style="display:none"></div>
                    </div>    <!-- real people should not fill this in and expect good things - do not remove this or risk form bot signups-->

                    <div style="position: absolute; left: -5000px;" aria-hidden="true"><input type="text" name="b_75419c71fe0a935e53dfa4a3f_91d0dccd39" tabindex="-1" value=""></div>

                    <div class="clear">
                      <input type="submit" value="" name="subscribe" id="mc-embedded-subscribe" class="button email-subscribe-button">
                    </div>
                  </div>
                </form>
              </div>

            </li>
          </ul>

          <div class="footer-social-icons">
            <a href="https://www.facebook.com/pytorch" target="_blank" class="facebook"></a>
            <a href="https://twitter.com/pytorch" target="_blank" class="twitter"></a>
            <a href="https://www.youtube.com/pytorch" target="_blank" class="youtube"></a>
          </div>
        </div>
      </div>
    </div>
  </footer>

  <div class="cookie-banner-wrapper">
  <div class="container">
    <p class="gdpr-notice">To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: <a href="https://www.facebook.com/policies/cookies/">Cookies Policy</a>.</p>
    <img class="close-button" src="../../_static/images/pytorch-x.svg">
  </div>
</div>

  <!-- End Footer -->

  <!-- Begin Mobile Menu -->

  <div class="mobile-main-menu">
    <div class="container-fluid">
      <div class="container">
        <div class="mobile-main-menu-header-container">
          <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
          <a class="main-menu-close-button" href="#" data-behavior="close-mobile-menu"></a>
        </div>
      </div>
    </div>

    <div class="mobile-main-menu-links-container">
      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <a href="https://pytorch.org/features">Features</a>
          </li>

          <li>
            <a href="https://pytorch.org/ecosystem">Ecosystem</a>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/hub">PyTorch Hub</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <a href="https://pytorch.org/resources">Resources</a>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>
    </div>
  </div>

  <!-- End Mobile Menu -->

  <script type="text/javascript" src="../../_static/js/vendor/anchor.min.js"></script>

  <script type="text/javascript">
    $(document).ready(function() {
      mobileMenu.bind();
      mobileTOC.bind();
      pytorchAnchors.bind();
      sideMenus.bind();
      scrollToAnchor.bind();
      highlightNavigation.bind();
      mainMenuDropdown.bind();
      filterTags.bind();

      // Remove any empty p tags that Sphinx adds
      $("[data-tags='null']").remove();

      // Add class to links that have code blocks, since we cannot create links in code blocks
      $("article.pytorch-article a span.pre").each(function(e) {
        $(this).closest("a").addClass("has-code");
      });
    })
  </script>
</body>
</html>