


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>torch.functional &mdash; PyTorch master documentation</title>
  

  
  
  
  
    <link rel="canonical" href="https://pytorch.org/docs/stable/_modules/torch/functional.html"/>
  

  

  
  
    

  

  <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  <!-- <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" /> -->
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/css/jit.css" type="text/css" />
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.11.1/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/katex-math.css" type="text/css" />
    <link rel="index" title="Index" href="../../genindex.html" />
    <link rel="search" title="Search" href="../../search.html" /> 

  
  <script src="../../_static/js/modernizr.min.js"></script>

  <!-- Preload the theme fonts -->

<link rel="preload" href="../../_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">

<!-- Preload the katex fonts -->

<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
</head>

<div class="container-fluid header-holder tutorials-header" id="header-holder">
  <div class="container">
    <div class="header-container">
      <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>

      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <div class="ecosystem-dropdown">
              <a id="dropdownMenuButton" data-toggle="ecosystem-dropdown">
                Ecosystem
              </a>
              <div class="ecosystem-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/hub"">
                  <span class=dropdown-title>Models (Beta)</span>
                  <p>Discover, publish, and reuse pre-trained models</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/ecosystem">
                  <span class=dropdown-title>Tools & Libraries</span>
                  <p>Explore the ecosystem of tools and libraries</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <div class="resources-dropdown">
              <a id="resourcesDropdownButton" data-toggle="resources-dropdown">
                Resources
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/resources"">
                  <span class=dropdown-title>Developer Resources</span>
                  <p>Find resources and get questions answered</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/features">
                  <span class=dropdown-title>About</span>
                  <p>Learn about PyTorch’s features and capabilities</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>

      <a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
    </div>

  </div>
</div>


<body class="pytorch-body">

   

    

    <div class="table-of-contents-link-wrapper">
      <span>Table of Contents</span>
      <a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
    </div>

    <nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
      <div class="pytorch-side-scroll">
        <div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          <div class="pytorch-left-menu-search">
            

            
              
              
                <div class="version">
                  master (1.5.0 )
                </div>
              
            

            


  


<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search Docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

            
          </div>

          
<div>
  <a style="color:#F05732" href="https://pytorch.org/docs/stable/_modules/torch/functional.html">
    You are viewing unstable developer preview docs.
    Click here to view docs for latest stable release.
  </a>
</div>

            
            
              
            
            
              <p class="caption"><span class="caption-text">Notes</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../notes/amp_examples.html">Automatic Mixed Precision examples</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/autograd.html">Autograd mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/broadcasting.html">Broadcasting semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/cpu_threading_torchscript_inference.html">CPU threading and TorchScript inference</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/cuda.html">CUDA semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/ddp.html">Distributed Data Parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/extending.html">Extending PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/faq.html">Frequently Asked Questions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/large_scale_deployments.html">Features for large-scale deployments</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/multiprocessing.html">Multiprocessing best practices</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/randomness.html">Reproducibility</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/serialization.html">Serialization semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../notes/windows.html">Windows FAQ</a></li>
</ul>
<p class="caption"><span class="caption-text">Language Bindings</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/cppdocs/">C++ API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../packages.html">Javadoc</a></li>
</ul>
<p class="caption"><span class="caption-text">Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../torch.html">torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../nn.html">torch.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../nn.functional.html">torch.nn.functional</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../tensors.html">torch.Tensor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../tensor_attributes.html">Tensor Attributes</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../tensor_view.html">Tensor Views</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../autograd.html">torch.autograd</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../cuda.html">torch.cuda</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../amp.html">torch.cuda.amp</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../distributed.html">torch.distributed</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../distributions.html">torch.distributions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../hub.html">torch.hub</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../jit.html">torch.jit</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../nn.init.html">torch.nn.init</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../onnx.html">torch.onnx</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../optim.html">torch.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../quantization.html">Quantization</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../rpc/index.html">Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../random.html">torch.random</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../sparse.html">torch.sparse</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../storage.html">torch.Storage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../bottleneck.html">torch.utils.bottleneck</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../checkpoint.html">torch.utils.checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../cpp_extension.html">torch.utils.cpp_extension</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../data.html">torch.utils.data</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../dlpack.html">torch.utils.dlpack</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../model_zoo.html">torch.utils.model_zoo</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../tensorboard.html">torch.utils.tensorboard</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../type_info.html">Type Info</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../named_tensor.html">Named Tensors</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../name_inference.html">Named Tensors operator coverage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../__config__.html">torch.__config__</a></li>
</ul>
<p class="caption"><span class="caption-text">Libraries</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/audio">torchaudio</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/text">torchtext</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/elastic/">TorchElastic</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/serve">TorchServe</a></li>
<li class="toctree-l1"><a class="reference external" href="http://pytorch.org/xla/">PyTorch on XLA Devices</a></li>
</ul>
<p class="caption"><span class="caption-text">Community</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../community/contribution_guide.html">PyTorch Contribution Guide</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../community/governance.html">PyTorch Governance</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../community/persons_of_interest.html">PyTorch Governance | Persons of Interest</a></li>
</ul>

            
          

        </div>
      </div>
    </nav>

    <div class="pytorch-container">
      <div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
        <div class="pytorch-breadcrumbs-wrapper">
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="pytorch-breadcrumbs">
    
      <li>
        <a href="../../index.html">
          
            Docs
          
        </a> &gt;
      </li>

        
          <li><a href="../index.html">Module code</a> &gt;</li>
        
          <li><a href="../torch.html">torch</a> &gt;</li>
        
      <li>torch.functional</li>
    
    
      <li class="pytorch-breadcrumbs-aside">
        
      </li>
    
  </ul>

  
</div>
        </div>

        <div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
          Shortcuts
        </div>
      </div>

      <section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
        <div class="pytorch-content-left">

        
          
          <div class="rst-content">
          
            <div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
             <article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
              
  <h1>Source code for torch.functional</h1><div class="highlight"><pre>
<span></span><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">torch.nn.functional</span> <span class="k">as</span> <span class="nn">F</span>
<span class="kn">from</span> <span class="nn">._lowrank</span> <span class="kn">import</span> <span class="n">svd_lowrank</span><span class="p">,</span> <span class="n">pca_lowrank</span>
<span class="kn">from</span> <span class="nn">._overrides</span> <span class="kn">import</span> <span class="n">has_torch_function</span><span class="p">,</span> <span class="n">handle_torch_function</span>
<span class="kn">from</span> <span class="nn">._jit_internal</span> <span class="kn">import</span> <span class="n">boolean_dispatch</span><span class="p">,</span> <span class="n">List</span>
<span class="kn">from</span> <span class="nn">._jit_internal</span> <span class="kn">import</span> <span class="n">_overload</span> <span class="k">as</span> <span class="n">overload</span>
<span class="kn">from</span> <span class="nn">torch._six</span> <span class="kn">import</span> <span class="n">PY2</span>

<span class="n">Tensor</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span>
<span class="kn">from</span> <span class="nn">torch</span> <span class="kn">import</span> <span class="n">_VF</span>

<span class="n">__all__</span> <span class="o">=</span> <span class="p">[</span>
    <span class="s1">&#39;align_tensors&#39;</span><span class="p">,</span>
    <span class="s1">&#39;broadcast_tensors&#39;</span><span class="p">,</span>
    <span class="s1">&#39;cartesian_prod&#39;</span><span class="p">,</span>
    <span class="s1">&#39;cdist&#39;</span><span class="p">,</span>
    <span class="s1">&#39;chain_matmul&#39;</span><span class="p">,</span>
    <span class="s1">&#39;einsum&#39;</span><span class="p">,</span>
    <span class="s1">&#39;lu&#39;</span><span class="p">,</span>
    <span class="s1">&#39;lu_unpack&#39;</span><span class="p">,</span>
    <span class="s1">&#39;norm&#39;</span><span class="p">,</span>
    <span class="s1">&#39;meshgrid&#39;</span><span class="p">,</span>
    <span class="s1">&#39;pca_lowrank&#39;</span><span class="p">,</span>
    <span class="s1">&#39;split&#39;</span><span class="p">,</span>
    <span class="s1">&#39;stft&#39;</span><span class="p">,</span>
    <span class="s1">&#39;svd_lowrank&#39;</span><span class="p">,</span>
    <span class="s1">&#39;tensordot&#39;</span><span class="p">,</span>
    <span class="s1">&#39;unique&#39;</span><span class="p">,</span>
    <span class="s1">&#39;unique_consecutive&#39;</span><span class="p">,</span>
<span class="p">]</span>


<div class="viewcode-block" id="broadcast_tensors"><a class="viewcode-back" href="../../torch.html#torch.broadcast_tensors">[docs]</a><span class="k">def</span> <span class="nf">broadcast_tensors</span><span class="p">(</span><span class="o">*</span><span class="n">tensors</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;broadcast_tensors(*tensors) -&gt; List of Tensors</span>

<span class="sd">    Broadcasts the given tensors according to :ref:`broadcasting-semantics`.</span>

<span class="sd">    Args:</span>
<span class="sd">        *tensors: any number of tensors of the same type</span>

<span class="sd">    .. warning::</span>

<span class="sd">        More than one element of a broadcasted tensor may refer to a single</span>
<span class="sd">        memory location. As a result, in-place operations (especially ones that</span>
<span class="sd">        are vectorized) may result in incorrect behavior. If you need to write</span>
<span class="sd">        to the tensors, please clone them first.</span>

<span class="sd">    Example::</span>

<span class="sd">        &gt;&gt;&gt; x = torch.arange(3).view(1, 3)</span>
<span class="sd">        &gt;&gt;&gt; y = torch.arange(2).view(2, 1)</span>
<span class="sd">        &gt;&gt;&gt; a, b = torch.broadcast_tensors(x, y)</span>
<span class="sd">        &gt;&gt;&gt; a.size()</span>
<span class="sd">        torch.Size([2, 3])</span>
<span class="sd">        &gt;&gt;&gt; a</span>
<span class="sd">        tensor([[0, 1, 2],</span>
<span class="sd">                [0, 1, 2]])</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">(</span><span class="n">tensors</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">broadcast_tensors</span><span class="p">,</span> <span class="n">tensors</span><span class="p">,</span> <span class="o">*</span><span class="n">tensors</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">broadcast_tensors</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span></div>


<div class="viewcode-block" id="split"><a class="viewcode-back" href="../../torch.html#torch.split">[docs]</a><span class="k">def</span> <span class="nf">split</span><span class="p">(</span><span class="n">tensor</span><span class="p">,</span> <span class="n">split_size_or_sections</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Splits the tensor into chunks. Each chunk is a view of the original tensor.</span>

<span class="sd">    If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will</span>
<span class="sd">    be split into equally sized chunks (if possible). Last chunk will be smaller if</span>
<span class="sd">    the tensor size along the given dimension :attr:`dim` is not divisible by</span>
<span class="sd">    :attr:`split_size`.</span>

<span class="sd">    If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split</span>
<span class="sd">    into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according</span>
<span class="sd">    to :attr:`split_size_or_sections`.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        tensor (Tensor): tensor to split.</span>
<span class="sd">        split_size_or_sections (int) or (list(int)): size of a single chunk or</span>
<span class="sd">            list of sizes for each chunk</span>
<span class="sd">        dim (int): dimension along which to split the tensor.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">tensor</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">((</span><span class="n">tensor</span><span class="p">,)):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">split</span><span class="p">,</span> <span class="p">(</span><span class="n">tensor</span><span class="p">,),</span> <span class="n">tensor</span><span class="p">,</span> <span class="n">split_size_or_sections</span><span class="p">,</span>
                                         <span class="n">dim</span><span class="o">=</span><span class="n">dim</span><span class="p">)</span>
    <span class="c1"># Overwriting reason:</span>
    <span class="c1"># This dispatches to two ATen functions depending on the type of</span>
    <span class="c1"># split_size_or_sections. The branching code is in tensor.py, which we</span>
    <span class="c1"># call here.</span>
    <span class="k">return</span> <span class="n">tensor</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="n">split_size_or_sections</span><span class="p">,</span> <span class="n">dim</span><span class="p">)</span></div>

<span class="c1"># equivalent to itertools.product(indices)</span>
<span class="k">def</span> <span class="nf">_indices_product</span><span class="p">(</span><span class="n">indices</span><span class="p">):</span>
    <span class="c1"># type: (List[int]) -&gt; (List[List[int]])</span>
    <span class="n">empty_list</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">annotate</span><span class="p">(</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="p">[])</span>
    <span class="n">result</span> <span class="o">=</span> <span class="p">[</span><span class="n">empty_list</span><span class="p">]</span>
    <span class="k">for</span> <span class="n">idx</span> <span class="ow">in</span> <span class="n">indices</span><span class="p">:</span>
        <span class="n">result_temp</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">annotate</span><span class="p">(</span><span class="n">List</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">int</span><span class="p">]],</span> <span class="p">[])</span>
        <span class="k">for</span> <span class="n">res</span> <span class="ow">in</span> <span class="n">result</span><span class="p">:</span>
            <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">idx</span><span class="p">):</span>
                <span class="n">result_temp</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">res</span> <span class="o">+</span> <span class="p">[</span><span class="n">i</span><span class="p">])</span>
        <span class="n">result</span> <span class="o">=</span> <span class="n">result_temp</span>
    <span class="k">return</span> <span class="n">result</span>

<span class="k">def</span> <span class="nf">_index_tensor_with_indices_list</span><span class="p">(</span><span class="n">tensor</span><span class="p">,</span> <span class="n">indices</span><span class="p">):</span>
    <span class="c1"># type: (Tensor, List[int]) -&gt; Tensor</span>
    <span class="n">out</span> <span class="o">=</span> <span class="n">tensor</span>
    <span class="k">for</span> <span class="n">index</span> <span class="ow">in</span> <span class="n">indices</span><span class="p">:</span>
        <span class="n">out</span> <span class="o">=</span> <span class="n">out</span><span class="p">[</span><span class="n">index</span><span class="p">]</span>
    <span class="k">return</span> <span class="n">out</span>

<div class="viewcode-block" id="lu_unpack"><a class="viewcode-back" href="../../torch.html#torch.lu_unpack">[docs]</a><span class="k">def</span> <span class="nf">lu_unpack</span><span class="p">(</span><span class="n">LU_data</span><span class="p">,</span> <span class="n">LU_pivots</span><span class="p">,</span> <span class="n">unpack_data</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">unpack_pivots</span><span class="o">=</span><span class="kc">True</span><span class="p">):</span>
    <span class="c1"># type: (Tensor, Tensor, bool, bool) -&gt;  (Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]])</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Unpacks the data and pivots from a LU factorization of a tensor.</span>

<span class="sd">    Returns a tuple of tensors as ``(the pivots, the L tensor, the U tensor)``.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        LU_data (Tensor): the packed LU factorization data</span>
<span class="sd">        LU_pivots (Tensor): the packed LU factorization pivots</span>
<span class="sd">        unpack_data (bool): flag indicating if the data should be unpacked</span>
<span class="sd">        unpack_pivots (bool): flag indicating if the pivots should be unpacked</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; A = torch.randn(2, 3, 3)</span>
<span class="sd">        &gt;&gt;&gt; A_LU, pivots = A.lu()</span>
<span class="sd">        &gt;&gt;&gt; P, A_L, A_U = torch.lu_unpack(A_LU, pivots)</span>
<span class="sd">        &gt;&gt;&gt;</span>
<span class="sd">        &gt;&gt;&gt; # can recover A from factorization</span>
<span class="sd">        &gt;&gt;&gt; A_ = torch.bmm(P, torch.bmm(A_L, A_U))</span>

<span class="sd">        &gt;&gt;&gt; # LU factorization of a rectangular matrix:</span>
<span class="sd">        &gt;&gt;&gt; A = torch.randn(2, 3, 2)</span>
<span class="sd">        &gt;&gt;&gt; A_LU, pivots = A.lu()</span>
<span class="sd">        &gt;&gt;&gt; P, A_L, A_U = torch.lu_unpack(A_LU, pivots)</span>
<span class="sd">        &gt;&gt;&gt; P</span>
<span class="sd">        tensor([[[1., 0., 0.],</span>
<span class="sd">                 [0., 1., 0.],</span>
<span class="sd">                 [0., 0., 1.]],</span>

<span class="sd">                [[0., 0., 1.],</span>
<span class="sd">                 [0., 1., 0.],</span>
<span class="sd">                 [1., 0., 0.]]])</span>
<span class="sd">        &gt;&gt;&gt; A_L</span>
<span class="sd">        tensor([[[ 1.0000,  0.0000],</span>
<span class="sd">                 [ 0.4763,  1.0000],</span>
<span class="sd">                 [ 0.3683,  0.1135]],</span>

<span class="sd">                [[ 1.0000,  0.0000],</span>
<span class="sd">                 [ 0.2957,  1.0000],</span>
<span class="sd">                 [-0.9668, -0.3335]]])</span>
<span class="sd">        &gt;&gt;&gt; A_U</span>
<span class="sd">        tensor([[[ 2.1962,  1.0881],</span>
<span class="sd">                 [ 0.0000, -0.8681]],</span>

<span class="sd">                [[-1.0947,  0.3736],</span>
<span class="sd">                 [ 0.0000,  0.5718]]])</span>
<span class="sd">        &gt;&gt;&gt; A_ = torch.bmm(P, torch.bmm(A_L, A_U))</span>
<span class="sd">        &gt;&gt;&gt; torch.norm(A_ - A)</span>
<span class="sd">        tensor(2.9802e-08)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="n">tens_ops</span> <span class="o">=</span> <span class="p">(</span><span class="n">LU_data</span><span class="p">,</span> <span class="n">LU_pivots</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">any</span><span class="p">([</span><span class="nb">type</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tens_ops</span><span class="p">])</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">(</span><span class="n">tens_ops</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span>
                <span class="n">lu_unpack</span><span class="p">,</span> <span class="n">tens_ops</span><span class="p">,</span> <span class="n">LU_data</span><span class="p">,</span> <span class="n">LU_pivots</span><span class="p">,</span> <span class="n">unpack_data</span><span class="o">=</span><span class="n">unpack_data</span><span class="p">,</span>
                <span class="n">unpack_pivots</span><span class="o">=</span><span class="n">unpack_pivots</span><span class="p">)</span>
    <span class="n">shape</span> <span class="o">=</span> <span class="n">LU_data</span><span class="o">.</span><span class="n">shape</span>
    <span class="c1"># In generalized LU factorization, the following shape relations hold:</span>
    <span class="c1">#   A.shape[-2:] == (m, n)</span>
    <span class="c1">#   P.shape[-2:] == (m, m)</span>
    <span class="c1">#   L.shape[-2:] == (m, k)</span>
    <span class="c1">#   U.shape[-2:] == (k, n)</span>
    <span class="c1"># where k = min(m, n)</span>
    <span class="n">m</span><span class="p">,</span> <span class="n">n</span> <span class="o">=</span> <span class="n">shape</span><span class="p">[</span><span class="o">-</span><span class="mi">2</span><span class="p">:]</span>
    <span class="n">k</span> <span class="o">=</span> <span class="nb">min</span><span class="p">(</span><span class="n">m</span><span class="p">,</span> <span class="n">n</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">unpack_data</span><span class="p">:</span>
        <span class="n">U</span> <span class="o">=</span> <span class="n">LU_data</span><span class="o">.</span><span class="n">triu</span><span class="p">()</span>
        <span class="k">if</span> <span class="n">m</span> <span class="o">!=</span> <span class="n">k</span><span class="p">:</span>
            <span class="n">U</span> <span class="o">=</span> <span class="n">U</span><span class="o">.</span><span class="n">narrow</span><span class="p">(</span><span class="o">-</span><span class="mi">2</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">k</span><span class="p">)</span>
        <span class="n">L</span> <span class="o">=</span> <span class="n">LU_data</span><span class="o">.</span><span class="n">tril</span><span class="p">()</span>
        <span class="k">if</span> <span class="n">k</span> <span class="o">!=</span> <span class="n">n</span><span class="p">:</span>
            <span class="n">L</span> <span class="o">=</span> <span class="n">L</span><span class="o">.</span><span class="n">narrow</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">k</span><span class="p">)</span>
        <span class="n">L</span><span class="o">.</span><span class="n">diagonal</span><span class="p">(</span><span class="n">dim1</span><span class="o">=-</span><span class="mi">2</span><span class="p">,</span> <span class="n">dim2</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span><span class="o">.</span><span class="n">fill_</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="n">L</span> <span class="o">=</span> <span class="n">U</span> <span class="o">=</span> <span class="kc">None</span>

    <span class="k">if</span> <span class="n">unpack_pivots</span><span class="p">:</span>
        <span class="n">LU_pivots_zero_idx</span> <span class="o">=</span> <span class="n">LU_pivots</span> <span class="o">-</span> <span class="mi">1</span>
        <span class="k">if</span> <span class="n">LU_data</span><span class="o">.</span><span class="n">dim</span><span class="p">()</span> <span class="o">&gt;</span> <span class="mi">2</span><span class="p">:</span>
            <span class="n">P</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">eye</span><span class="p">(</span><span class="n">m</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">LU_data</span><span class="o">.</span><span class="n">device</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">LU_data</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span> \
                     <span class="o">.</span><span class="n">expand</span><span class="p">(</span><span class="n">shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">+</span> <span class="p">(</span><span class="n">m</span><span class="p">,))</span> \
                     <span class="o">.</span><span class="n">clone</span><span class="p">(</span><span class="n">memory_format</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">contiguous_format</span><span class="p">)</span>

            <span class="c1"># TODO: rewrite when TorchScript supports product and map as</span>
            <span class="c1"># product(*map(lambda x: list(range(x)), shape[:-2])) when issue 33781 is fixed</span>
            <span class="n">indices</span> <span class="o">=</span> <span class="n">_indices_product</span><span class="p">(</span><span class="n">shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">2</span><span class="p">])</span>
            <span class="k">for</span> <span class="n">idx</span> <span class="ow">in</span> <span class="n">indices</span><span class="p">:</span>
                <span class="n">final_order</span> <span class="o">=</span> <span class="p">[</span><span class="n">i</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">m</span><span class="p">)]</span>  <span class="c1"># noqa: C416 TODO: rewrite as list(range(m))</span>
                <span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">j</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">_index_tensor_with_indices_list</span><span class="p">(</span><span class="n">LU_pivots_zero_idx</span><span class="p">,</span> <span class="n">idx</span><span class="p">)):</span>
                    <span class="n">final_order</span><span class="p">[</span><span class="n">k</span><span class="p">],</span> <span class="n">final_order</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">=</span> <span class="n">final_order</span><span class="p">[</span><span class="n">j</span><span class="p">],</span> <span class="n">final_order</span><span class="p">[</span><span class="n">k</span><span class="p">]</span>
                <span class="c1"># TODO: remove _index_tensor_with_indices_list when TorchScript supports indexing Tensor with list</span>
                <span class="n">p_idx</span> <span class="o">=</span> <span class="n">_index_tensor_with_indices_list</span><span class="p">(</span><span class="n">P</span><span class="p">,</span> <span class="n">idx</span><span class="p">)</span>
                <span class="n">p_idx</span><span class="o">.</span><span class="n">copy_</span><span class="p">(</span><span class="n">p_idx</span><span class="o">.</span><span class="n">index_select</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">as_tensor</span><span class="p">(</span><span class="n">final_order</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">LU_pivots</span><span class="o">.</span><span class="n">device</span><span class="p">)))</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">P</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">eye</span><span class="p">(</span><span class="n">m</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">LU_data</span><span class="o">.</span><span class="n">device</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">LU_data</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
            <span class="n">final_order</span> <span class="o">=</span> <span class="p">[</span><span class="n">i</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">m</span><span class="p">)]</span>  <span class="c1"># noqa: C416 TODO: rewrite as list(range(m))</span>
            <span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">j</span><span class="p">,</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">LU_pivots_zero_idx</span><span class="p">):</span>
                <span class="n">final_order</span><span class="p">[</span><span class="n">k</span><span class="p">],</span> <span class="n">final_order</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">=</span> <span class="n">final_order</span><span class="p">[</span><span class="n">j</span><span class="p">],</span> <span class="n">final_order</span><span class="p">[</span><span class="n">k</span><span class="p">]</span>
            <span class="n">P</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">index_select</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">as_tensor</span><span class="p">(</span><span class="n">final_order</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">LU_pivots</span><span class="o">.</span><span class="n">device</span><span class="p">))</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="n">P</span> <span class="o">=</span> <span class="kc">None</span>

    <span class="k">return</span> <span class="n">P</span><span class="p">,</span> <span class="n">L</span><span class="p">,</span> <span class="n">U</span></div>


<div class="viewcode-block" id="einsum"><a class="viewcode-back" href="../../torch.html#torch.einsum">[docs]</a><span class="k">def</span> <span class="nf">einsum</span><span class="p">(</span><span class="n">equation</span><span class="p">,</span> <span class="o">*</span><span class="n">operands</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;einsum(equation, *operands) -&gt; Tensor</span>

<span class="sd">This function provides a way of computing multilinear expressions (i.e. sums of products) using the</span>
<span class="sd">Einstein summation convention.</span>

<span class="sd">Args:</span>
<span class="sd">    equation (string): The equation is given in terms of lower case letters (indices) to be associated</span>
<span class="sd">           with each dimension of the operands and result. The left hand side lists the operands</span>
<span class="sd">           dimensions, separated by commas. There should be one index letter per tensor dimension.</span>
<span class="sd">           The right hand side follows after `-&gt;` and gives the indices for the output.</span>
<span class="sd">           If the `-&gt;` and right hand side are omitted, it implicitly defined as the alphabetically</span>
<span class="sd">           sorted list of all indices appearing exactly once in the left hand side.</span>
<span class="sd">           The indices not apprearing in the output are summed over after multiplying the operands</span>
<span class="sd">           entries.</span>
<span class="sd">           If an index appears several times for the same operand, a diagonal is taken.</span>
<span class="sd">           Ellipses `...` represent a fixed number of dimensions. If the right hand side is inferred,</span>
<span class="sd">           the ellipsis dimensions are at the beginning of the output.</span>
<span class="sd">    operands (Tensor): The operands to compute the Einstein sum of.</span>

<span class="sd">Examples::</span>

<span class="sd">    &gt;&gt;&gt; x = torch.randn(5)</span>
<span class="sd">    &gt;&gt;&gt; y = torch.randn(4)</span>
<span class="sd">    &gt;&gt;&gt; torch.einsum(&#39;i,j-&gt;ij&#39;, x, y)  # outer product</span>
<span class="sd">    tensor([[-0.0570, -0.0286, -0.0231,  0.0197],</span>
<span class="sd">            [ 1.2616,  0.6335,  0.5113, -0.4351],</span>
<span class="sd">            [ 1.4452,  0.7257,  0.5857, -0.4984],</span>
<span class="sd">            [-0.4647, -0.2333, -0.1883,  0.1603],</span>
<span class="sd">            [-1.1130, -0.5588, -0.4510,  0.3838]])</span>


<span class="sd">    &gt;&gt;&gt; A = torch.randn(3,5,4)</span>
<span class="sd">    &gt;&gt;&gt; l = torch.randn(2,5)</span>
<span class="sd">    &gt;&gt;&gt; r = torch.randn(2,4)</span>
<span class="sd">    &gt;&gt;&gt; torch.einsum(&#39;bn,anm,bm-&gt;ba&#39;, l, A, r) # compare torch.nn.functional.bilinear</span>
<span class="sd">    tensor([[-0.3430, -5.2405,  0.4494],</span>
<span class="sd">            [ 0.3311,  5.5201, -3.0356]])</span>


<span class="sd">    &gt;&gt;&gt; As = torch.randn(3,2,5)</span>
<span class="sd">    &gt;&gt;&gt; Bs = torch.randn(3,5,4)</span>
<span class="sd">    &gt;&gt;&gt; torch.einsum(&#39;bij,bjk-&gt;bik&#39;, As, Bs) # batch matrix multiplication</span>
<span class="sd">    tensor([[[-1.0564, -1.5904,  3.2023,  3.1271],</span>
<span class="sd">             [-1.6706, -0.8097, -0.8025, -2.1183]],</span>

<span class="sd">            [[ 4.2239,  0.3107, -0.5756, -0.2354],</span>
<span class="sd">             [-1.4558, -0.3460,  1.5087, -0.8530]],</span>

<span class="sd">            [[ 2.8153,  1.8787, -4.3839, -1.2112],</span>
<span class="sd">             [ 0.3728, -2.1131,  0.0921,  0.8305]]])</span>

<span class="sd">    &gt;&gt;&gt; A = torch.randn(3, 3)</span>
<span class="sd">    &gt;&gt;&gt; torch.einsum(&#39;ii-&gt;i&#39;, A) # diagonal</span>
<span class="sd">    tensor([-0.7825,  0.8291, -0.1936])</span>

<span class="sd">    &gt;&gt;&gt; A = torch.randn(4, 3, 3)</span>
<span class="sd">    &gt;&gt;&gt; torch.einsum(&#39;...ii-&gt;...i&#39;, A) # batch diagonal</span>
<span class="sd">    tensor([[-1.0864,  0.7292,  0.0569],</span>
<span class="sd">            [-0.9725, -1.0270,  0.6493],</span>
<span class="sd">            [ 0.5832, -1.1716, -1.5084],</span>
<span class="sd">            [ 0.4041, -1.1690,  0.8570]])</span>

<span class="sd">    &gt;&gt;&gt; A = torch.randn(2, 3, 4, 5)</span>
<span class="sd">    &gt;&gt;&gt; torch.einsum(&#39;...ij-&gt;...ji&#39;, A).shape # batch permute</span>
<span class="sd">    torch.Size([2, 3, 5, 4])</span>
<span class="sd">&quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">operands</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">(</span><span class="n">operands</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">einsum</span><span class="p">,</span> <span class="n">operands</span><span class="p">,</span> <span class="o">*</span><span class="n">operands</span><span class="p">)</span>
    <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">operands</span><span class="p">)</span> <span class="o">==</span> <span class="mi">1</span> <span class="ow">and</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">operands</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)):</span>
        <span class="c1"># the old interface of passing the operands as one list argument</span>
        <span class="n">operands</span> <span class="o">=</span> <span class="n">operands</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
    <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">einsum</span><span class="p">(</span><span class="n">equation</span><span class="p">,</span> <span class="n">operands</span><span class="p">)</span></div>


<div class="viewcode-block" id="meshgrid"><a class="viewcode-back" href="../../torch.html#torch.meshgrid">[docs]</a><span class="k">def</span> <span class="nf">meshgrid</span><span class="p">(</span><span class="o">*</span><span class="n">tensors</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Take :math:`N` tensors, each of which can be either scalar or 1-dimensional</span>
<span class="sd">vector, and create :math:`N` N-dimensional grids, where the :math:`i` :sup:`th` grid is defined by</span>
<span class="sd">expanding the :math:`i` :sup:`th` input over dimensions defined by other inputs.</span>


<span class="sd">    Args:</span>
<span class="sd">        tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be</span>
<span class="sd">        treated as tensors of size :math:`(1,)` automatically</span>

<span class="sd">    Returns:</span>
<span class="sd">        seq (sequence of Tensors): If the input has :math:`k` tensors of size</span>
<span class="sd">        :math:`(N_1,), (N_2,), \ldots , (N_k,)`, then the output would also have :math:`k` tensors,</span>
<span class="sd">        where all tensors are of size :math:`(N_1, N_2, \ldots , N_k)`.</span>

<span class="sd">    Example::</span>

<span class="sd">        &gt;&gt;&gt; x = torch.tensor([1, 2, 3])</span>
<span class="sd">        &gt;&gt;&gt; y = torch.tensor([4, 5, 6])</span>
<span class="sd">        &gt;&gt;&gt; grid_x, grid_y = torch.meshgrid(x, y)</span>
<span class="sd">        &gt;&gt;&gt; grid_x</span>
<span class="sd">        tensor([[1, 1, 1],</span>
<span class="sd">                [2, 2, 2],</span>
<span class="sd">                [3, 3, 3]])</span>
<span class="sd">        &gt;&gt;&gt; grid_y</span>
<span class="sd">        tensor([[4, 5, 6],</span>
<span class="sd">                [4, 5, 6],</span>
<span class="sd">                [4, 5, 6]])</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">(</span><span class="n">tensors</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">meshgrid</span><span class="p">,</span> <span class="n">tensors</span><span class="p">,</span> <span class="o">*</span><span class="n">tensors</span><span class="p">)</span>
    <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span> <span class="o">==</span> <span class="mi">1</span> <span class="ow">and</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">tensors</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)):</span>
        <span class="c1"># the old interface of passing the operands as one list argument</span>
        <span class="n">tensors</span> <span class="o">=</span> <span class="n">tensors</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
    <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">meshgrid</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span></div>


<div class="viewcode-block" id="stft"><a class="viewcode-back" href="../../torch.html#torch.stft">[docs]</a><span class="k">def</span> <span class="nf">stft</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">n_fft</span><span class="p">,</span> <span class="n">hop_length</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">win_length</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">window</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
         <span class="n">center</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">pad_mode</span><span class="o">=</span><span class="s1">&#39;reflect&#39;</span><span class="p">,</span> <span class="n">normalized</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">onesided</span><span class="o">=</span><span class="kc">True</span><span class="p">):</span>
    <span class="c1"># type: (Tensor, int, Optional[int], Optional[int], Optional[Tensor], bool, str, bool, bool) -&gt; Tensor</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Short-time Fourier transform (STFT).</span>

<span class="sd">    Ignoring the optional batch dimension, this method computes the following</span>
<span class="sd">    expression:</span>

<span class="sd">    .. math::</span>
<span class="sd">        X[m, \omega] = \sum_{k = 0}^{\text{win\_length-1}}%</span>
<span class="sd">                            \text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ %</span>
<span class="sd">                            \exp\left(- j \frac{2 \pi \cdot \omega k}{\text{win\_length}}\right),</span>

<span class="sd">    where :math:`m` is the index of the sliding window, and :math:`\omega` is</span>
<span class="sd">    the frequency that :math:`0 \leq \omega &lt; \text{n\_fft}`. When</span>
<span class="sd">    :attr:`onesided` is the default value ``True``,</span>

<span class="sd">    * :attr:`input` must be either a 1-D time sequence or a 2-D batch of time</span>
<span class="sd">      sequences.</span>

<span class="sd">    * If :attr:`hop_length` is ``None`` (default), it is treated as equal to</span>
<span class="sd">      ``floor(n_fft / 4)``.</span>

<span class="sd">    * If :attr:`win_length` is ``None`` (default), it is treated as equal to</span>
<span class="sd">      :attr:`n_fft`.</span>

<span class="sd">    * :attr:`window` can be a 1-D tensor of size :attr:`win_length`, e.g., from</span>
<span class="sd">      :meth:`torch.hann_window`. If :attr:`window` is ``None`` (default), it is</span>
<span class="sd">      treated as if having :math:`1` everywhere in the window. If</span>
<span class="sd">      :math:`\text{win\_length} &lt; \text{n\_fft}`, :attr:`window` will be padded on</span>
<span class="sd">      both sides to length :attr:`n_fft` before being applied.</span>

<span class="sd">    * If :attr:`center` is ``True`` (default), :attr:`input` will be padded on</span>
<span class="sd">      both sides so that the :math:`t`-th frame is centered at time</span>
<span class="sd">      :math:`t \times \text{hop\_length}`. Otherwise, the :math:`t`-th frame</span>
<span class="sd">      begins at time  :math:`t \times \text{hop\_length}`.</span>

<span class="sd">    * :attr:`pad_mode` determines the padding method used on :attr:`input` when</span>
<span class="sd">      :attr:`center` is ``True``. See :meth:`torch.nn.functional.pad` for</span>
<span class="sd">      all available options. Default is ``&quot;reflect&quot;``.</span>

<span class="sd">    * If :attr:`onesided` is ``True`` (default), only values for :math:`\omega`</span>
<span class="sd">      in :math:`\left[0, 1, 2, \dots, \left\lfloor \frac{\text{n\_fft}}{2} \right\rfloor + 1\right]`</span>
<span class="sd">      are returned because the real-to-complex Fourier transform satisfies the</span>
<span class="sd">      conjugate symmetry, i.e., :math:`X[m, \omega] = X[m, \text{n\_fft} - \omega]^*`.</span>

<span class="sd">    * If :attr:`normalized` is ``True`` (default is ``False``), the function</span>
<span class="sd">      returns the normalized STFT results, i.e., multiplied by :math:`(\text{frame\_length})^{-0.5}`.</span>

<span class="sd">    Returns the real and the imaginary parts together as one tensor of size</span>
<span class="sd">    :math:`(* \times N \times T \times 2)`, where :math:`*` is the optional</span>
<span class="sd">    batch size of :attr:`input`, :math:`N` is the number of frequencies where</span>
<span class="sd">    STFT is applied, :math:`T` is the total number of frames used, and each pair</span>
<span class="sd">    in the last dimension represents a complex number as the real part and the</span>
<span class="sd">    imaginary part.</span>

<span class="sd">    .. warning::</span>
<span class="sd">      This function changed signature at version 0.4.1. Calling with the</span>
<span class="sd">      previous signature may cause error or return incorrect result.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        input (Tensor): the input tensor</span>
<span class="sd">        n_fft (int): size of Fourier transform</span>
<span class="sd">        hop_length (int, optional): the distance between neighboring sliding window</span>
<span class="sd">            frames. Default: ``None`` (treated as equal to ``floor(n_fft / 4)``)</span>
<span class="sd">        win_length (int, optional): the size of window frame and STFT filter.</span>
<span class="sd">            Default: ``None``  (treated as equal to :attr:`n_fft`)</span>
<span class="sd">        window (Tensor, optional): the optional window function.</span>
<span class="sd">            Default: ``None`` (treated as window of all :math:`1` s)</span>
<span class="sd">        center (bool, optional): whether to pad :attr:`input` on both sides so</span>
<span class="sd">            that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.</span>
<span class="sd">            Default: ``True``</span>
<span class="sd">        pad_mode (string, optional): controls the padding method used when</span>
<span class="sd">            :attr:`center` is ``True``. Default: ``&quot;reflect&quot;``</span>
<span class="sd">        normalized (bool, optional): controls whether to return the normalized STFT results</span>
<span class="sd">             Default: ``False``</span>
<span class="sd">        onesided (bool, optional): controls whether to return half of results to</span>
<span class="sd">            avoid redundancy Default: ``True``</span>

<span class="sd">    Returns:</span>
<span class="sd">        Tensor: A tensor containing the STFT result with shape described above</span>

<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">((</span><span class="nb">input</span><span class="p">,)):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span>
                <span class="n">stft</span><span class="p">,</span> <span class="p">(</span><span class="nb">input</span><span class="p">,),</span> <span class="nb">input</span><span class="p">,</span> <span class="n">n_fft</span><span class="p">,</span> <span class="n">hop_length</span><span class="o">=</span><span class="n">hop_length</span><span class="p">,</span> <span class="n">win_length</span><span class="o">=</span><span class="n">win_length</span><span class="p">,</span>
                <span class="n">window</span><span class="o">=</span><span class="n">window</span><span class="p">,</span> <span class="n">center</span><span class="o">=</span><span class="n">center</span><span class="p">,</span> <span class="n">pad_mode</span><span class="o">=</span><span class="n">pad_mode</span><span class="p">,</span> <span class="n">normalized</span><span class="o">=</span><span class="n">normalized</span><span class="p">,</span>
                <span class="n">onesided</span><span class="o">=</span><span class="n">onesided</span><span class="p">)</span>
    <span class="c1"># TODO: after having proper ways to map Python strings to ATen Enum, move</span>
    <span class="c1">#       this and F.pad to ATen.</span>
    <span class="k">if</span> <span class="n">center</span><span class="p">:</span>
        <span class="n">signal_dim</span> <span class="o">=</span> <span class="nb">input</span><span class="o">.</span><span class="n">dim</span><span class="p">()</span>
        <span class="n">extended_shape</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="p">(</span><span class="mi">3</span> <span class="o">-</span> <span class="n">signal_dim</span><span class="p">)</span> <span class="o">+</span> <span class="nb">list</span><span class="p">(</span><span class="nb">input</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
        <span class="n">pad</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">n_fft</span> <span class="o">//</span> <span class="mi">2</span><span class="p">)</span>
        <span class="nb">input</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">pad</span><span class="p">(</span><span class="nb">input</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="n">extended_shape</span><span class="p">),</span> <span class="p">(</span><span class="n">pad</span><span class="p">,</span> <span class="n">pad</span><span class="p">),</span> <span class="n">pad_mode</span><span class="p">)</span>
        <span class="nb">input</span> <span class="o">=</span> <span class="nb">input</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="nb">input</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="o">-</span><span class="n">signal_dim</span><span class="p">:])</span>
    <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">stft</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">n_fft</span><span class="p">,</span> <span class="n">hop_length</span><span class="p">,</span> <span class="n">win_length</span><span class="p">,</span> <span class="n">window</span><span class="p">,</span> <span class="n">normalized</span><span class="p">,</span> <span class="n">onesided</span><span class="p">)</span></div>


<span class="k">del</span> <span class="n">torch</span><span class="o">.</span><span class="n">unique_dim</span>


<div class="viewcode-block" id="unique"><a class="viewcode-back" href="../../torch.html#torch.unique">[docs]</a><span class="k">def</span> <span class="nf">unique</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="nb">sorted</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">return_inverse</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">return_counts</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Returns the unique elements of the input tensor.</span>

<span class="sd">    .. note:: This function is different from :func:`torch.unique_consecutive` in the sense that</span>
<span class="sd">        this function also eliminates non-consecutive duplicate values.</span>

<span class="sd">    .. note:: Currently in the CUDA implementation and the CPU implementation when dim is specified,</span>
<span class="sd">        `torch.unique` always sort the tensor at the beginning regardless of the `sort` argument.</span>
<span class="sd">        Sorting could be slow, so if your input tensor is already sorted, it is recommended to use</span>
<span class="sd">        :func:`torch.unique_consecutive` which avoids the sorting.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        input (Tensor): the input tensor</span>
<span class="sd">        sorted (bool): Whether to sort the unique elements in ascending order</span>
<span class="sd">            before returning as output.</span>
<span class="sd">        return_inverse (bool): Whether to also return the indices for where</span>
<span class="sd">            elements in the original input ended up in the returned unique list.</span>
<span class="sd">        return_counts (bool): Whether to also return the counts for each unique</span>
<span class="sd">            element.</span>
<span class="sd">        dim (int): the dimension to apply unique. If ``None``, the unique of the</span>
<span class="sd">            flattened input is returned. default: ``None``</span>

<span class="sd">    Returns:</span>
<span class="sd">        (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing</span>

<span class="sd">            - **output** (*Tensor*): the output list of unique scalar elements.</span>
<span class="sd">            - **inverse_indices** (*Tensor*): (optional) if</span>
<span class="sd">              :attr:`return_inverse` is True, there will be an additional</span>
<span class="sd">              returned tensor (same shape as input) representing the indices</span>
<span class="sd">              for where elements in the original input map to in the output;</span>
<span class="sd">              otherwise, this function will only return a single tensor.</span>
<span class="sd">            - **counts** (*Tensor*): (optional) if</span>
<span class="sd">              :attr:`return_counts` is True, there will be an additional</span>
<span class="sd">              returned tensor (same shape as output or output.size(dim),</span>
<span class="sd">              if dim was specified) representing the number of occurrences</span>
<span class="sd">              for each unique value or tensor.</span>

<span class="sd">    Example::</span>

<span class="sd">        &gt;&gt;&gt; output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long))</span>
<span class="sd">        &gt;&gt;&gt; output</span>
<span class="sd">        tensor([ 2,  3,  1])</span>

<span class="sd">        &gt;&gt;&gt; output, inverse_indices = torch.unique(</span>
<span class="sd">                torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True)</span>
<span class="sd">        &gt;&gt;&gt; output</span>
<span class="sd">        tensor([ 1,  2,  3])</span>
<span class="sd">        &gt;&gt;&gt; inverse_indices</span>
<span class="sd">        tensor([ 0,  2,  1,  2])</span>

<span class="sd">        &gt;&gt;&gt; output, inverse_indices = torch.unique(</span>
<span class="sd">                torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True)</span>
<span class="sd">        &gt;&gt;&gt; output</span>
<span class="sd">        tensor([ 1,  2,  3])</span>
<span class="sd">        &gt;&gt;&gt; inverse_indices</span>
<span class="sd">        tensor([[ 0,  2],</span>
<span class="sd">                [ 1,  2]])</span>

<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">((</span><span class="nb">input</span><span class="p">,)):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span>
                <span class="n">unique</span><span class="p">,</span> <span class="p">(</span><span class="nb">input</span><span class="p">,),</span> <span class="nb">input</span><span class="p">,</span> <span class="nb">sorted</span><span class="o">=</span><span class="nb">sorted</span><span class="p">,</span> <span class="n">return_inverse</span><span class="o">=</span><span class="n">return_inverse</span><span class="p">,</span>
                <span class="n">return_counts</span><span class="o">=</span><span class="n">return_counts</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="n">dim</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">dim</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">output</span><span class="p">,</span> <span class="n">inverse_indices</span><span class="p">,</span> <span class="n">counts</span> <span class="o">=</span> <span class="n">_VF</span><span class="o">.</span><span class="n">unique_dim</span><span class="p">(</span>
            <span class="nb">input</span><span class="p">,</span>
            <span class="n">dim</span><span class="p">,</span>
            <span class="nb">sorted</span><span class="o">=</span><span class="nb">sorted</span><span class="p">,</span>
            <span class="n">return_inverse</span><span class="o">=</span><span class="n">return_inverse</span><span class="p">,</span>
            <span class="n">return_counts</span><span class="o">=</span><span class="n">return_counts</span><span class="p">,</span>
        <span class="p">)</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="n">output</span><span class="p">,</span> <span class="n">inverse_indices</span><span class="p">,</span> <span class="n">counts</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_unique2</span><span class="p">(</span>
            <span class="nb">input</span><span class="p">,</span>
            <span class="nb">sorted</span><span class="o">=</span><span class="nb">sorted</span><span class="p">,</span>
            <span class="n">return_inverse</span><span class="o">=</span><span class="n">return_inverse</span><span class="p">,</span>
            <span class="n">return_counts</span><span class="o">=</span><span class="n">return_counts</span><span class="p">,</span>
        <span class="p">)</span>
    <span class="k">if</span> <span class="n">return_inverse</span> <span class="ow">and</span> <span class="n">return_counts</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">output</span><span class="p">,</span> <span class="n">inverse_indices</span><span class="p">,</span> <span class="n">counts</span>
    <span class="k">elif</span> <span class="n">return_inverse</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">output</span><span class="p">,</span> <span class="n">inverse_indices</span>
    <span class="k">elif</span> <span class="n">return_counts</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">output</span><span class="p">,</span> <span class="n">counts</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">output</span></div>


<div class="viewcode-block" id="unique_consecutive"><a class="viewcode-back" href="../../torch.html#torch.unique_consecutive">[docs]</a><span class="k">def</span> <span class="nf">unique_consecutive</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">return_inverse</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">return_counts</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Eliminates all but the first element from every consecutive group of equivalent elements.</span>

<span class="sd">    .. note:: This function is different from :func:`torch.unique` in the sense that this function</span>
<span class="sd">        only eliminates consecutive duplicate values. This semantics is similar to `std::unique`</span>
<span class="sd">        in C++.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        input (Tensor): the input tensor</span>
<span class="sd">        return_inverse (bool): Whether to also return the indices for where</span>
<span class="sd">            elements in the original input ended up in the returned unique list.</span>
<span class="sd">        return_counts (bool): Whether to also return the counts for each unique</span>
<span class="sd">            element.</span>
<span class="sd">        dim (int): the dimension to apply unique. If ``None``, the unique of the</span>
<span class="sd">            flattened input is returned. default: ``None``</span>

<span class="sd">    Returns:</span>
<span class="sd">        (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing</span>

<span class="sd">            - **output** (*Tensor*): the output list of unique scalar elements.</span>
<span class="sd">            - **inverse_indices** (*Tensor*): (optional) if</span>
<span class="sd">              :attr:`return_inverse` is True, there will be an additional</span>
<span class="sd">              returned tensor (same shape as input) representing the indices</span>
<span class="sd">              for where elements in the original input map to in the output;</span>
<span class="sd">              otherwise, this function will only return a single tensor.</span>
<span class="sd">            - **counts** (*Tensor*): (optional) if</span>
<span class="sd">              :attr:`return_counts` is True, there will be an additional</span>
<span class="sd">              returned tensor (same shape as output or output.size(dim),</span>
<span class="sd">              if dim was specified) representing the number of occurrences</span>
<span class="sd">              for each unique value or tensor.</span>

<span class="sd">    Example::</span>

<span class="sd">        &gt;&gt;&gt; x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2])</span>
<span class="sd">        &gt;&gt;&gt; output = torch.unique_consecutive(x)</span>
<span class="sd">        &gt;&gt;&gt; output</span>
<span class="sd">        tensor([1, 2, 3, 1, 2])</span>

<span class="sd">        &gt;&gt;&gt; output, inverse_indices = torch.unique_consecutive(x, return_inverse=True)</span>
<span class="sd">        &gt;&gt;&gt; output</span>
<span class="sd">        tensor([1, 2, 3, 1, 2])</span>
<span class="sd">        &gt;&gt;&gt; inverse_indices</span>
<span class="sd">        tensor([0, 0, 1, 1, 2, 3, 3, 4])</span>

<span class="sd">        &gt;&gt;&gt; output, counts = torch.unique_consecutive(x, return_counts=True)</span>
<span class="sd">        &gt;&gt;&gt; output</span>
<span class="sd">        tensor([1, 2, 3, 1, 2])</span>
<span class="sd">        &gt;&gt;&gt; counts</span>
<span class="sd">        tensor([2, 2, 1, 2, 1])</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">((</span><span class="nb">input</span><span class="p">,)):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span>
                <span class="n">unique_consecutive</span><span class="p">,</span> <span class="p">(</span><span class="nb">input</span><span class="p">,),</span> <span class="nb">input</span><span class="p">,</span> <span class="n">return_inverse</span><span class="o">=</span><span class="n">return_inverse</span><span class="p">,</span>
                <span class="n">return_counts</span><span class="o">=</span><span class="n">return_counts</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="n">dim</span><span class="p">)</span>
    <span class="n">output</span><span class="p">,</span> <span class="n">inverse_indices</span><span class="p">,</span> <span class="n">counts</span> <span class="o">=</span> <span class="n">_VF</span><span class="o">.</span><span class="n">unique_consecutive</span><span class="p">(</span>
        <span class="nb">input</span><span class="p">,</span> <span class="n">return_inverse</span><span class="o">=</span><span class="n">return_inverse</span><span class="p">,</span> <span class="n">return_counts</span><span class="o">=</span><span class="n">return_counts</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="n">dim</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">return_inverse</span> <span class="ow">and</span> <span class="n">return_counts</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">output</span><span class="p">,</span> <span class="n">inverse_indices</span><span class="p">,</span> <span class="n">counts</span>
    <span class="k">if</span> <span class="n">return_inverse</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">output</span><span class="p">,</span> <span class="n">inverse_indices</span>
    <span class="k">if</span> <span class="n">return_counts</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">output</span><span class="p">,</span> <span class="n">counts</span>
    <span class="k">return</span> <span class="n">output</span></div>


<div class="viewcode-block" id="tensordot"><a class="viewcode-back" href="../../torch.html#torch.tensordot">[docs]</a><span class="k">def</span> <span class="nf">tensordot</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">,</span> <span class="n">dims</span><span class="o">=</span><span class="mi">2</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Returns a contraction of a and b over multiple dimensions.</span>

<span class="sd">    :attr:`tensordot` implements a generalized matrix product.</span>

<span class="sd">    Args:</span>
<span class="sd">      a (Tensor): Left tensor to contract</span>
<span class="sd">      b (Tensor): Right tensor to contract</span>
<span class="sd">      dims (int or tuple of two lists of integers): number of dimensions to</span>
<span class="sd">         contract or explicit lists of dimensions for :attr:`a` and</span>
<span class="sd">         :attr:`b` respectively</span>

<span class="sd">    When called with a non-negative integer argument :attr:`dims` = :math:`d`, and</span>
<span class="sd">    the number of dimensions of :attr:`a` and :attr:`b` is :math:`m` and :math:`n`,</span>
<span class="sd">    respectively, :func:`~torch.tensordot` computes</span>

<span class="sd">    .. math::</span>
<span class="sd">        r_{i_0,...,i_{m-d}, i_d,...,i_n}</span>
<span class="sd">          = \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}.</span>

<span class="sd">    When called with :attr:`dims` of the list form, the given dimensions will be contracted</span>
<span class="sd">    in place of the last :math:`d` of :attr:`a` and the first :math:`d` of :math:`b`. The sizes</span>
<span class="sd">    in these dimensions must match, but :func:`~torch.tensordot` will deal with broadcasted</span>
<span class="sd">    dimensions.</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; a = torch.arange(60.).reshape(3, 4, 5)</span>
<span class="sd">        &gt;&gt;&gt; b = torch.arange(24.).reshape(4, 3, 2)</span>
<span class="sd">        &gt;&gt;&gt; torch.tensordot(a, b, dims=([1, 0], [0, 1]))</span>
<span class="sd">        tensor([[4400., 4730.],</span>
<span class="sd">                [4532., 4874.],</span>
<span class="sd">                [4664., 5018.],</span>
<span class="sd">                [4796., 5162.],</span>
<span class="sd">                [4928., 5306.]])</span>

<span class="sd">        &gt;&gt;&gt; a = torch.randn(3, 4, 5, device=&#39;cuda&#39;)</span>
<span class="sd">        &gt;&gt;&gt; b = torch.randn(4, 5, 6, device=&#39;cuda&#39;)</span>
<span class="sd">        &gt;&gt;&gt; c = torch.tensordot(a, b, dims=2).cpu()</span>
<span class="sd">        tensor([[ 8.3504, -2.5436,  6.2922,  2.7556, -1.0732,  3.2741],</span>
<span class="sd">                [ 3.3161,  0.0704,  5.0187, -0.4079, -4.3126,  4.8744],</span>
<span class="sd">                [ 0.8223,  3.9445,  3.2168, -0.2400,  3.4117,  1.7780]])</span>

<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">a</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="ow">or</span> <span class="nb">type</span><span class="p">(</span><span class="n">b</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">((</span><span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">)):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">tensordot</span><span class="p">,</span> <span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">),</span> <span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">,</span> <span class="n">dims</span><span class="o">=</span><span class="n">dims</span><span class="p">)</span>
    <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">dims</span><span class="p">,</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">))</span> <span class="ow">or</span> \
       <span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">dims</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">)</span> <span class="ow">and</span> <span class="n">dims</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span> <span class="o">&gt;</span> <span class="mi">1</span><span class="p">):</span>
        <span class="n">dims_a</span><span class="p">,</span> <span class="n">dims_b</span> <span class="o">=</span> <span class="n">dims</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">dims</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">):</span>
            <span class="n">dims</span> <span class="o">=</span> <span class="n">dims</span><span class="o">.</span><span class="n">item</span><span class="p">()</span>
        <span class="k">if</span> <span class="n">dims</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;tensordot expects dims &gt;= 0, but got dims=</span><span class="si">{}</span><span class="s2">&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">dims</span><span class="p">))</span>
        <span class="n">dims_a</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="o">-</span><span class="n">dims</span><span class="p">,</span> <span class="mi">0</span><span class="p">))</span>
        <span class="n">dims_b</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="n">dims</span><span class="p">))</span>
    <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">tensordot</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">,</span> <span class="n">dims_a</span><span class="p">,</span> <span class="n">dims_b</span><span class="p">)</span></div>


<div class="viewcode-block" id="cartesian_prod"><a class="viewcode-back" href="../../torch.html#torch.cartesian_prod">[docs]</a><span class="k">def</span> <span class="nf">cartesian_prod</span><span class="p">(</span><span class="o">*</span><span class="n">tensors</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Do cartesian product of the given sequence of tensors. The behavior is similar to</span>
<span class="sd">    python&#39;s `itertools.product`.</span>

<span class="sd">    Arguments:</span>
<span class="sd">        *tensors: any number of 1 dimensional tensors.</span>

<span class="sd">    Returns:</span>
<span class="sd">        Tensor: A tensor equivalent to converting all the input tensors into lists,</span>
<span class="sd">            do `itertools.product` on these lists, and finally convert the resulting list</span>
<span class="sd">            into tensor.</span>

<span class="sd">    Example::</span>

<span class="sd">        &gt;&gt;&gt; a = [1, 2, 3]</span>
<span class="sd">        &gt;&gt;&gt; b = [4, 5]</span>
<span class="sd">        &gt;&gt;&gt; list(itertools.product(a, b))</span>
<span class="sd">        [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)]</span>
<span class="sd">        &gt;&gt;&gt; tensor_a = torch.tensor(a)</span>
<span class="sd">        &gt;&gt;&gt; tensor_b = torch.tensor(b)</span>
<span class="sd">        &gt;&gt;&gt; torch.cartesian_prod(tensor_a, tensor_b)</span>
<span class="sd">        tensor([[1, 4],</span>
<span class="sd">                [1, 5],</span>
<span class="sd">                [2, 4],</span>
<span class="sd">                [2, 5],</span>
<span class="sd">                [3, 4],</span>
<span class="sd">                [3, 5]])</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">(</span><span class="n">tensors</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">cartesian_prod</span><span class="p">,</span> <span class="n">tensors</span><span class="p">,</span> <span class="o">*</span><span class="n">tensors</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">cartesian_prod</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span></div>


<div class="viewcode-block" id="cdist"><a class="viewcode-back" href="../../torch.html#torch.cdist">[docs]</a><span class="k">def</span> <span class="nf">cdist</span><span class="p">(</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">,</span> <span class="n">p</span><span class="o">=</span><span class="mf">2.</span><span class="p">,</span> <span class="n">compute_mode</span><span class="o">=</span><span class="s1">&#39;use_mm_for_euclid_dist_if_necessary&#39;</span><span class="p">):</span>
    <span class="c1"># type: (Tensor, Tensor, float, str) -&gt; (Tensor)</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Computes batched the p-norm distance between each pair of the two collections of row vectors.</span>

<span class="sd">    Args:</span>
<span class="sd">        x1 (Tensor): input tensor of shape :math:`B \times P \times M`.</span>
<span class="sd">        x2 (Tensor): input tensor of shape :math:`B \times R \times M`.</span>
<span class="sd">        p: p value for the p-norm distance to calculate between each vector pair</span>
<span class="sd">            :math:`\in [0, \infty]`.</span>
<span class="sd">        compute_mode:</span>
<span class="sd">            &#39;use_mm_for_euclid_dist_if_necessary&#39; - will use matrix multiplication approach to calculate</span>
<span class="sd">            euclidean distance (p = 2) if P &gt; 25 or R &gt; 25</span>
<span class="sd">            &#39;use_mm_for_euclid_dist&#39; - will always use matrix multiplication approach to calculate</span>
<span class="sd">            euclidean distance (p = 2)</span>
<span class="sd">            &#39;donot_use_mm_for_euclid_dist&#39; - will never use matrix multiplication approach to calculate</span>
<span class="sd">            euclidean distance (p = 2)</span>
<span class="sd">            Default: use_mm_for_euclid_dist_if_necessary.</span>

<span class="sd">    If x1 has shape :math:`B \times P \times M` and x2 has shape :math:`B \times R \times M` then the</span>
<span class="sd">    output will have shape :math:`B \times P \times R`.</span>

<span class="sd">    This function is equivalent to `scipy.spatial.distance.cdist(input,&#39;minkowski&#39;, p=p)`</span>
<span class="sd">    if :math:`p \in (0, \infty)`. When :math:`p = 0` it is equivalent to</span>
<span class="sd">    `scipy.spatial.distance.cdist(input, &#39;hamming&#39;) * M`. When :math:`p = \infty`, the closest</span>
<span class="sd">    scipy function is `scipy.spatial.distance.cdist(xn, lambda x, y: np.abs(x - y).max())`.</span>

<span class="sd">    Example:</span>

<span class="sd">        &gt;&gt;&gt; a = torch.tensor([[0.9041,  0.0196], [-0.3108, -2.4423], [-0.4821,  1.059]])</span>
<span class="sd">        &gt;&gt;&gt; a</span>
<span class="sd">        tensor([[ 0.9041,  0.0196],</span>
<span class="sd">                [-0.3108, -2.4423],</span>
<span class="sd">                [-0.4821,  1.0590]])</span>
<span class="sd">        &gt;&gt;&gt; b = torch.tensor([[-2.1763, -0.4713], [-0.6986,  1.3702]])</span>
<span class="sd">        &gt;&gt;&gt; b</span>
<span class="sd">        tensor([[-2.1763, -0.4713],</span>
<span class="sd">                [-0.6986,  1.3702]])</span>
<span class="sd">        &gt;&gt;&gt; torch.cdist(a, b, p=2)</span>
<span class="sd">        tensor([[3.1193, 2.0959],</span>
<span class="sd">                [2.7138, 3.8322],</span>
<span class="sd">                [2.2830, 0.3791]])</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">x1</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="ow">or</span> <span class="nb">type</span><span class="p">(</span><span class="n">x2</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">((</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">)):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span>
                <span class="n">cdist</span><span class="p">,</span> <span class="p">(</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">),</span> <span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">,</span> <span class="n">p</span><span class="o">=</span><span class="n">p</span><span class="p">,</span> <span class="n">compute_mode</span><span class="o">=</span><span class="n">compute_mode</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">compute_mode</span> <span class="o">==</span> <span class="s1">&#39;use_mm_for_euclid_dist_if_necessary&#39;</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">cdist</span><span class="p">(</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">,</span> <span class="n">p</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
    <span class="k">elif</span> <span class="n">compute_mode</span> <span class="o">==</span> <span class="s1">&#39;use_mm_for_euclid_dist&#39;</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">cdist</span><span class="p">(</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">,</span> <span class="n">p</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
    <span class="k">elif</span> <span class="n">compute_mode</span> <span class="o">==</span> <span class="s1">&#39;donot_use_mm_for_euclid_dist&#39;</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">cdist</span><span class="p">(</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">,</span> <span class="n">p</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">&quot;</span><span class="si">{}</span><span class="s2"> is not a valid value for compute_mode&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">compute_mode</span><span class="p">))</span></div>

<span class="c1"># TODO: type dim as BroadcastingList when https://github.com/pytorch/pytorch/issues/33782 is fixed</span>
<span class="nd">@overload</span>  <span class="c1"># noqa: 749</span>
<span class="k">def</span> <span class="nf">norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="o">=</span><span class="s2">&quot;fro&quot;</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>  <span class="c1"># noqa: 749</span>
    <span class="c1"># type: (Tensor, str, Optional[List[int]], bool, Optional[Tensor], Optional[int]) -&gt; Tensor</span>
    <span class="k">pass</span>

<span class="nd">@overload</span>  <span class="c1"># noqa: 749</span>
<span class="k">def</span> <span class="nf">norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="o">=</span><span class="s2">&quot;fro&quot;</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>  <span class="c1"># noqa: 749</span>
    <span class="c1"># type: (Tensor, Optional[number], Optional[List[int]], bool, Optional[Tensor], Optional[int]) -&gt; Tensor</span>
    <span class="k">pass</span>

<span class="nd">@overload</span>  <span class="c1"># noqa: 749</span>
<span class="k">def</span> <span class="nf">norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="o">=</span><span class="s2">&quot;fro&quot;</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>  <span class="c1"># noqa: 749</span>
    <span class="c1"># type: (Tensor, Optional[number], Optional[int], bool, Optional[Tensor], Optional[int]) -&gt; Tensor</span>
    <span class="k">pass</span>

<span class="nd">@overload</span>  <span class="c1"># noqa: 749</span>
<span class="k">def</span> <span class="nf">norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="o">=</span><span class="s2">&quot;fro&quot;</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>  <span class="c1"># noqa: 749</span>
    <span class="c1"># type: (Tensor, str, Optional[int], bool, Optional[Tensor], Optional[int]) -&gt; Tensor</span>
    <span class="k">pass</span>

<div class="viewcode-block" id="norm"><a class="viewcode-back" href="../../torch.html#torch.norm">[docs]</a><span class="k">def</span> <span class="nf">norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="o">=</span><span class="s2">&quot;fro&quot;</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>  <span class="c1"># noqa: 749</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Returns the matrix norm or vector norm of a given tensor.</span>

<span class="sd">    Args:</span>
<span class="sd">        input (Tensor): the input tensor</span>
<span class="sd">        p (int, float, inf, -inf, &#39;fro&#39;, &#39;nuc&#39;, optional): the order of norm. Default: ``&#39;fro&#39;``</span>
<span class="sd">            The following norms can be calculated:</span>

<span class="sd">            =====  ============================  ==========================</span>
<span class="sd">            ord    matrix norm                   vector norm</span>
<span class="sd">            =====  ============================  ==========================</span>
<span class="sd">            None   Frobenius norm                2-norm</span>
<span class="sd">            &#39;fro&#39;  Frobenius norm                --</span>
<span class="sd">            &#39;nuc&#39;  nuclear norm                  --</span>
<span class="sd">            Other  as vec norm when dim is None  sum(abs(x)**ord)**(1./ord)</span>
<span class="sd">            =====  ============================  ==========================</span>

<span class="sd">        dim (int, 2-tuple of ints, 2-list of ints, optional): If it is an int,</span>
<span class="sd">            vector norm will be calculated, if it is 2-tuple of ints, matrix norm</span>
<span class="sd">            will be calculated. If the value is None, matrix norm will be calculated</span>
<span class="sd">            when the input tensor only has two dimensions, vector norm will be</span>
<span class="sd">            calculated when the input tensor only has one dimension. If the input</span>
<span class="sd">            tensor has more than two dimensions, the vector norm will be applied to</span>
<span class="sd">            last dimension.</span>
<span class="sd">        keepdim (bool, optional): whether the output tensors have :attr:`dim`</span>
<span class="sd">            retained or not. Ignored if :attr:`dim` = ``None`` and</span>
<span class="sd">            :attr:`out` = ``None``. Default: ``False``</span>
<span class="sd">        out (Tensor, optional): the output tensor. Ignored if</span>
<span class="sd">            :attr:`dim` = ``None`` and :attr:`out` = ``None``.</span>
<span class="sd">        dtype (:class:`torch.dtype`, optional): the desired data type of</span>
<span class="sd">            returned tensor. If specified, the input tensor is casted to</span>
<span class="sd">            :attr:&#39;dtype&#39; while performing the operation. Default: None.</span>


<span class="sd">    Example::</span>

<span class="sd">        &gt;&gt;&gt; import torch</span>
<span class="sd">        &gt;&gt;&gt; a = torch.arange(9, dtype= torch.float) - 4</span>
<span class="sd">        &gt;&gt;&gt; b = a.reshape((3, 3))</span>
<span class="sd">        &gt;&gt;&gt; torch.norm(a)</span>
<span class="sd">        tensor(7.7460)</span>
<span class="sd">        &gt;&gt;&gt; torch.norm(b)</span>
<span class="sd">        tensor(7.7460)</span>
<span class="sd">        &gt;&gt;&gt; torch.norm(a, float(&#39;inf&#39;))</span>
<span class="sd">        tensor(4.)</span>
<span class="sd">        &gt;&gt;&gt; torch.norm(b, float(&#39;inf&#39;))</span>
<span class="sd">        tensor(4.)</span>
<span class="sd">        &gt;&gt;&gt; c = torch.tensor([[ 1, 2, 3],[-1, 1, 4]] , dtype= torch.float)</span>
<span class="sd">        &gt;&gt;&gt; torch.norm(c, dim=0)</span>
<span class="sd">        tensor([1.4142, 2.2361, 5.0000])</span>
<span class="sd">        &gt;&gt;&gt; torch.norm(c, dim=1)</span>
<span class="sd">        tensor([3.7417, 4.2426])</span>
<span class="sd">        &gt;&gt;&gt; torch.norm(c, p=1, dim=1)</span>
<span class="sd">        tensor([6., 6.])</span>
<span class="sd">        &gt;&gt;&gt; d = torch.arange(8, dtype= torch.float).reshape(2,2,2)</span>
<span class="sd">        &gt;&gt;&gt; torch.norm(d, dim=(1,2))</span>
<span class="sd">        tensor([ 3.7417, 11.2250])</span>
<span class="sd">        &gt;&gt;&gt; torch.norm(d[0, :, :]), torch.norm(d[1, :, :])</span>
<span class="sd">        (tensor(3.7417), tensor(11.2250))</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">((</span><span class="nb">input</span><span class="p">,)):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span>
                <span class="n">norm</span><span class="p">,</span> <span class="p">(</span><span class="nb">input</span><span class="p">,),</span> <span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="o">=</span><span class="n">p</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="n">dim</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">out</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">)</span>
        <span class="c1"># py2 considers isinstance(unicodestr, str) == False</span>
        <span class="k">if</span> <span class="n">PY2</span> <span class="ow">and</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">unicode</span><span class="p">):</span>
            <span class="n">p</span> <span class="o">=</span> <span class="nb">str</span><span class="p">(</span><span class="n">p</span><span class="p">)</span>

    <span class="n">ndim</span> <span class="o">=</span> <span class="nb">input</span><span class="o">.</span><span class="n">dim</span><span class="p">()</span>


    <span class="c1"># catch default case</span>
    <span class="k">if</span> <span class="n">dim</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">out</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">dtype</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">p</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
            <span class="k">if</span> <span class="n">p</span> <span class="o">==</span> <span class="s2">&quot;fro&quot;</span><span class="p">:</span>
                <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">frobenius_norm</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="p">)</span>

    <span class="c1"># TODO: when https://github.com/pytorch/pytorch/issues/33782 is fixed</span>
    <span class="c1"># remove the overloads where dim is an int and replace with BraodcastingList1</span>
    <span class="c1"># and remove next four lines, replace _dim with dim</span>
    <span class="k">if</span> <span class="n">dim</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">dim</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span>
            <span class="n">_dim</span> <span class="o">=</span> <span class="p">[</span><span class="n">dim</span><span class="p">]</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">_dim</span> <span class="o">=</span> <span class="n">dim</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="n">_dim</span> <span class="o">=</span> <span class="kc">None</span>

    <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">p</span> <span class="o">==</span> <span class="s2">&quot;fro&quot;</span><span class="p">:</span>
            <span class="k">if</span> <span class="n">dtype</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">&quot;dtype argument is not supported in frobenius norm&quot;</span><span class="p">)</span>

            <span class="k">if</span> <span class="n">_dim</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
                <span class="n">_dim</span> <span class="o">=</span> <span class="p">[</span><span class="n">i</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">ndim</span><span class="p">)]</span>  <span class="c1"># noqa: C416 TODO: rewrite as list(range(m))</span>
            <span class="k">if</span> <span class="n">out</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
                <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">frobenius_norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">_dim</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">frobenius_norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">_dim</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">out</span><span class="p">)</span>
        <span class="k">elif</span> <span class="n">p</span> <span class="o">==</span> <span class="s2">&quot;nuc&quot;</span><span class="p">:</span>
            <span class="k">if</span> <span class="n">dtype</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">&quot;dtype argument is not supported in nuclear norm&quot;</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">_dim</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
                <span class="k">if</span> <span class="n">out</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
                    <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">nuclear_norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">)</span>
                <span class="k">else</span><span class="p">:</span>
                    <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">nuclear_norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">out</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="k">if</span> <span class="n">out</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
                    <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">nuclear_norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">_dim</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">)</span>
                <span class="k">else</span><span class="p">:</span>
                    <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">nuclear_norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">_dim</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">out</span><span class="p">)</span>
        <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;only valid string values are &#39;fro&#39; and &#39;nuc&#39;, found </span><span class="si">{}</span><span class="s2">&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">p</span><span class="p">))</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="k">if</span> <span class="n">_dim</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">_dim</span> <span class="o">=</span> <span class="p">[</span><span class="n">i</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">ndim</span><span class="p">)]</span>  <span class="c1"># noqa: C416 TODO: rewrite as list(range(m))</span>

        <span class="k">if</span> <span class="n">out</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="k">if</span> <span class="n">dtype</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
                <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="p">,</span> <span class="n">_dim</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="p">,</span> <span class="n">_dim</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">if</span> <span class="n">dtype</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
                <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="p">,</span> <span class="n">_dim</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">out</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="p">,</span> <span class="n">_dim</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">out</span><span class="p">)</span></div>

<div class="viewcode-block" id="chain_matmul"><a class="viewcode-back" href="../../torch.html#torch.chain_matmul">[docs]</a><span class="k">def</span> <span class="nf">chain_matmul</span><span class="p">(</span><span class="o">*</span><span class="n">matrices</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed</span>
<span class="sd">    using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms</span>
<span class="sd">    of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N`</span>
<span class="sd">    needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned.</span>
<span class="sd">    If :math:`N` is 1, then this is a no-op - the original matrix is returned as is.</span>


<span class="sd">    Args:</span>
<span class="sd">        matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined.</span>


<span class="sd">    Returns:</span>
<span class="sd">        Tensor: if the :math:`i^{th}` tensor was of dimensions :math:`p_{i} \times p_{i + 1}`, then the product</span>
<span class="sd">        would be of dimensions :math:`p_{1} \times p_{N + 1}`.</span>

<span class="sd">    Example::</span>

<span class="sd">        &gt;&gt;&gt; a = torch.randn(3, 4)</span>
<span class="sd">        &gt;&gt;&gt; b = torch.randn(4, 5)</span>
<span class="sd">        &gt;&gt;&gt; c = torch.randn(5, 6)</span>
<span class="sd">        &gt;&gt;&gt; d = torch.randn(6, 7)</span>
<span class="sd">        &gt;&gt;&gt; torch.chain_matmul(a, b, c, d)</span>
<span class="sd">        tensor([[ -2.3375,  -3.9790,  -4.1119,  -6.6577,   9.5609, -11.5095,  -3.2614],</span>
<span class="sd">                [ 21.4038,   3.3378,  -8.4982,  -5.2457, -10.2561,  -2.4684,   2.7163],</span>
<span class="sd">                [ -0.9647,  -5.8917,  -2.3213,  -5.2284,  12.8615, -12.2816,  -2.5095]])</span>

<span class="sd">    .. _`[CLRS]`: https://mitpress.mit.edu/books/introduction-algorithms-third-edition</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">matrices</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">(</span><span class="n">matrices</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">chain_matmul</span><span class="p">,</span> <span class="n">matrices</span><span class="p">,</span> <span class="o">*</span><span class="n">matrices</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">chain_matmul</span><span class="p">(</span><span class="n">matrices</span><span class="p">)</span></div>


<span class="k">def</span> <span class="nf">_lu_impl</span><span class="p">(</span><span class="n">A</span><span class="p">,</span> <span class="n">pivot</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">get_infos</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
    <span class="c1"># type: (Tensor, bool, bool, Any) -&gt; Tuple[Tensor, Tensor, Tensor] </span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Computes the LU factorization of a matrix or batches of matrices</span>
<span class="sd">    :attr:`A`. Returns a tuple containing the LU factorization and</span>
<span class="sd">    pivots of :attr:`A`.  Pivoting is done if :attr:`pivot` is set to</span>
<span class="sd">    ``True``.</span>

<span class="sd">    .. note::</span>
<span class="sd">        The pivots returned by the function are 1-indexed. If :attr:`pivot` is ``False``,</span>
<span class="sd">        then the returned pivots is a tensor filled with zeros of the appropriate size.</span>

<span class="sd">    .. note::</span>
<span class="sd">        LU factorization with :attr:`pivot` = ``False`` is not available for CPU, and attempting</span>
<span class="sd">        to do so will throw an error. However, LU factorization with :attr:`pivot` = ``False`` is</span>
<span class="sd">        available for CUDA.</span>

<span class="sd">    .. note::</span>
<span class="sd">        This function does not check if the factorization was successful or not if</span>
<span class="sd">        :attr:`get_infos` is ``True`` since the status of the factorization is present in the</span>
<span class="sd">        third element of the return tuple.</span>

<span class="sd">    .. note::</span>
<span class="sd">        In the case of batches of square matrices with size less or</span>
<span class="sd">        equal to 32 on a CUDA device, the LU factorization is repeated</span>
<span class="sd">        for singular matrices due to the bug in the MAGMA library (see</span>
<span class="sd">        magma issue 13).</span>

<span class="sd">    Arguments:</span>
<span class="sd">        A (Tensor): the tensor to factor of size :math:`(*, m, n)`</span>
<span class="sd">        pivot (bool, optional): controls whether pivoting is done. Default: ``True``</span>
<span class="sd">        get_infos (bool, optional): if set to ``True``, returns an info IntTensor.</span>
<span class="sd">                                    Default: ``False``</span>
<span class="sd">        out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``,</span>
<span class="sd">                               then the elements in the tuple are Tensor, IntTensor,</span>
<span class="sd">                               and IntTensor. If :attr:`get_infos` is ``False``, then the</span>
<span class="sd">                               elements in the tuple are Tensor, IntTensor. Default: ``None``</span>

<span class="sd">    Returns:</span>
<span class="sd">        (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing</span>

<span class="sd">            - **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)`</span>

<span class="sd">            - **pivots** (*IntTensor*): the pivots of size :math:`(*, m)`</span>

<span class="sd">            - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of</span>
<span class="sd">              size :math:`(*)` where non-zero values indicate whether factorization for the matrix or</span>
<span class="sd">              each minibatch has succeeded or failed</span>

<span class="sd">    Example::</span>

<span class="sd">        &gt;&gt;&gt; A = torch.randn(2, 3, 3)</span>
<span class="sd">        &gt;&gt;&gt; A_LU, pivots = torch.lu(A)</span>
<span class="sd">        &gt;&gt;&gt; A_LU</span>
<span class="sd">        tensor([[[ 1.3506,  2.5558, -0.0816],</span>
<span class="sd">                 [ 0.1684,  1.1551,  0.1940],</span>
<span class="sd">                 [ 0.1193,  0.6189, -0.5497]],</span>

<span class="sd">                [[ 0.4526,  1.2526, -0.3285],</span>
<span class="sd">                 [-0.7988,  0.7175, -0.9701],</span>
<span class="sd">                 [ 0.2634, -0.9255, -0.3459]]])</span>
<span class="sd">        &gt;&gt;&gt; pivots</span>
<span class="sd">        tensor([[ 3,  3,  3],</span>
<span class="sd">                [ 3,  3,  3]], dtype=torch.int32)</span>
<span class="sd">        &gt;&gt;&gt; A_LU, pivots, info = torch.lu(A, get_infos=True)</span>
<span class="sd">        &gt;&gt;&gt; if info.nonzero().size(0) == 0:</span>
<span class="sd">        ...   print(&#39;LU factorization succeeded for all samples!&#39;)</span>
<span class="sd">        LU factorization succeeded for all samples!</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="c1"># If get_infos is True, then we don&#39;t need to check for errors and vice versa</span>
    <span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">_lu_with_info</span><span class="p">(</span><span class="n">A</span><span class="p">,</span> <span class="n">pivot</span><span class="o">=</span><span class="n">pivot</span><span class="p">,</span> <span class="n">check_errors</span><span class="o">=</span><span class="p">(</span><span class="ow">not</span> <span class="n">get_infos</span><span class="p">))</span>

<span class="k">def</span> <span class="nf">_check_list_size</span><span class="p">(</span><span class="n">out_len</span><span class="p">,</span> <span class="n">get_infos</span><span class="p">,</span> <span class="n">out</span><span class="p">):</span>
    <span class="c1"># type: (int, bool, List[Tensor]) -&gt; None   </span>
    <span class="n">get_infos_int</span> <span class="o">=</span> <span class="mi">1</span> <span class="k">if</span> <span class="n">get_infos</span> <span class="k">else</span> <span class="mi">0</span>
    <span class="k">if</span> <span class="n">out_len</span> <span class="o">-</span> <span class="n">get_infos_int</span> <span class="o">!=</span> <span class="mi">2</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;expected tuple of </span><span class="si">{}</span><span class="s2"> elements but got </span><span class="si">{}</span><span class="s2">&quot;</span>
                        <span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="mi">2</span> <span class="o">+</span> <span class="nb">int</span><span class="p">(</span><span class="n">get_infos</span><span class="p">),</span> <span class="nb">len</span><span class="p">(</span><span class="n">out_len</span><span class="p">)))</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">out</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">)):</span>
        <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;argument &#39;out&#39; must be tuple of Tensors, not </span><span class="si">{}</span><span class="s2">&quot;</span>
                        <span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">out</span><span class="p">)</span><span class="o">.</span><span class="vm">__name__</span><span class="p">))</span>

<span class="k">def</span> <span class="nf">_lu_with_infos</span><span class="p">(</span><span class="n">A</span><span class="p">,</span> <span class="n">pivot</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">get_infos</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
    <span class="c1"># type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor, Tensor]]) -&gt; Tuple[Tensor, Tensor, Tensor]</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">A</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">((</span><span class="n">A</span><span class="p">,)):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span>
                <span class="n">lu</span><span class="p">,</span> <span class="p">(</span><span class="n">A</span><span class="p">,),</span> <span class="n">A</span><span class="p">,</span> <span class="n">pivot</span><span class="o">=</span><span class="n">pivot</span><span class="p">,</span> <span class="n">get_infos</span><span class="o">=</span><span class="n">get_infos</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">out</span><span class="p">)</span>
    <span class="n">result</span> <span class="o">=</span> <span class="n">_lu_impl</span><span class="p">(</span><span class="n">A</span><span class="p">,</span> <span class="n">pivot</span><span class="p">,</span> <span class="n">get_infos</span><span class="p">,</span> <span class="n">out</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">out</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">_check_list_size</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">out</span><span class="p">),</span> <span class="n">get_infos</span><span class="p">,</span> <span class="n">out</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">out</span><span class="p">)):</span>
            <span class="n">out</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">resize_as_</span><span class="p">(</span><span class="n">result</span><span class="p">[</span><span class="n">i</span><span class="p">])</span><span class="o">.</span><span class="n">copy_</span><span class="p">(</span><span class="n">result</span><span class="p">[</span><span class="n">i</span><span class="p">])</span>
        <span class="k">return</span> <span class="n">out</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">result</span>  <span class="c1"># A_LU, pivots, infos</span>

<span class="k">def</span> <span class="nf">_lu_no_infos</span><span class="p">(</span><span class="n">A</span><span class="p">,</span> <span class="n">pivot</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">get_infos</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
    <span class="c1"># type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor]]) -&gt; Tuple[Tensor, Tensor] </span>
    <span class="c1"># need to check for torch_function here so that we exit if </span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">A</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">((</span><span class="n">A</span><span class="p">,)):</span>
            <span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span>
                <span class="n">lu</span><span class="p">,</span> <span class="p">(</span><span class="n">A</span><span class="p">,),</span> <span class="n">A</span><span class="p">,</span> <span class="n">pivot</span><span class="o">=</span><span class="n">pivot</span><span class="p">,</span> <span class="n">get_infos</span><span class="o">=</span><span class="n">get_infos</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">out</span><span class="p">)</span>
    <span class="n">result</span> <span class="o">=</span> <span class="n">_lu_impl</span><span class="p">(</span><span class="n">A</span><span class="p">,</span> <span class="n">pivot</span><span class="p">,</span> <span class="n">get_infos</span><span class="p">,</span> <span class="n">out</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">out</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">_check_list_size</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">out</span><span class="p">),</span> <span class="n">get_infos</span><span class="p">,</span> <span class="n">out</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">out</span><span class="p">)):</span>
            <span class="n">out</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">resize_as_</span><span class="p">(</span><span class="n">result</span><span class="p">[</span><span class="n">i</span><span class="p">])</span><span class="o">.</span><span class="n">copy_</span><span class="p">(</span><span class="n">result</span><span class="p">[</span><span class="n">i</span><span class="p">])</span>
        <span class="k">return</span> <span class="n">out</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">result</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">result</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>  <span class="c1"># A_LU, pivots</span>

<span class="c1"># The return type of lu depends on `get_infos`, so in order to resolve the output type</span>
<span class="c1"># of lu in TorchScript we need to statically know the value of `get_infos` </span>
<span class="n">lu</span> <span class="o">=</span> <span class="n">boolean_dispatch</span><span class="p">(</span>
    <span class="n">arg_name</span><span class="o">=</span><span class="s1">&#39;get_infos&#39;</span><span class="p">,</span>
    <span class="n">arg_index</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span>
    <span class="n">default</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
    <span class="n">if_true</span><span class="o">=</span><span class="n">_lu_with_infos</span><span class="p">,</span>
    <span class="n">if_false</span><span class="o">=</span><span class="n">_lu_no_infos</span><span class="p">,</span>
    <span class="n">module_name</span><span class="o">=</span><span class="vm">__name__</span><span class="p">,</span>
    <span class="n">func_name</span><span class="o">=</span><span class="s1">&#39;lu&#39;</span><span class="p">)</span>
<span class="n">lu</span><span class="o">.</span><span class="vm">__doc__</span> <span class="o">=</span> <span class="n">_lu_impl</span><span class="o">.</span><span class="vm">__doc__</span>

<span class="k">def</span> <span class="nf">align_tensors</span><span class="p">(</span><span class="o">*</span><span class="n">tensors</span><span class="p">):</span>
    <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s1">&#39;`align_tensors` not yet implemented.&#39;</span><span class="p">)</span>
</pre></div>

             </article>
             
            </div>
            <footer>
  

  

    <hr>

  

  <div role="contentinfo">
    <p>
        &copy; Copyright 2019, Torch Contributors.

    </p>
  </div>
    
      <div>
        Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
      </div>
     

</footer>

          </div>
        </div>

        <div class="pytorch-content-right" id="pytorch-content-right">
          <div class="pytorch-right-menu" id="pytorch-right-menu">
            <div class="pytorch-side-scroll" id="pytorch-side-scroll-right">
              
            </div>
          </div>
        </div>
      </section>
    </div>

  


  

     
       <script type="text/javascript" id="documentation_options" data-url_root="../../" src="../../_static/documentation_options.js"></script>
         <script src="../../_static/jquery.js"></script>
         <script src="../../_static/underscore.js"></script>
         <script src="../../_static/doctools.js"></script>
         <script src="../../_static/language_data.js"></script>
     

  

  <script type="text/javascript" src="../../_static/js/vendor/popper.min.js"></script>
  <script type="text/javascript" src="../../_static/js/vendor/bootstrap.min.js"></script>
  <script src="https://cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
  <script type="text/javascript" src="../../_static/js/theme.js"></script>

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>
 
<script>
  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
  })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

  ga('create', 'UA-90545585-1', 'auto');
  ga('send', 'pageview');

</script>

<script async src="https://www.googletagmanager.com/gtag/js?id=UA-117752657-2"></script>

<script>
  window.dataLayer = window.dataLayer || [];

  function gtag(){dataLayer.push(arguments);}

  gtag('js', new Date());
  gtag('config', 'UA-117752657-2');
</script>

<img height="1" width="1" style="border-style:none;" alt="" src="https://www.googleadservices.com/pagead/conversion/795629140/?label=txkmCPmdtosBENSssfsC&amp;guid=ON&amp;script=0"/>


  <!-- Begin Footer -->

  <div class="container-fluid docs-tutorials-resources" id="docs-tutorials-resources">
    <div class="container">
      <div class="row">
        <div class="col-md-4 text-center">
          <h2>Docs</h2>
          <p>Access comprehensive developer documentation for PyTorch</p>
          <a class="with-right-arrow" href="https://pytorch.org/docs/stable/index.html">View Docs</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Tutorials</h2>
          <p>Get in-depth tutorials for beginners and advanced developers</p>
          <a class="with-right-arrow" href="https://pytorch.org/tutorials">View Tutorials</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Resources</h2>
          <p>Find development resources and get your questions answered</p>
          <a class="with-right-arrow" href="https://pytorch.org/resources">View Resources</a>
        </div>
      </div>
    </div>
  </div>

  <footer class="site-footer">
    <div class="container footer-container">
      <div class="footer-logo-wrapper">
        <a href="https://pytorch.org/" class="footer-logo"></a>
      </div>

      <div class="footer-links-wrapper">
        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/">PyTorch</a></li>
            <li><a href="https://pytorch.org/get-started">Get Started</a></li>
            <li><a href="https://pytorch.org/features">Features</a></li>
            <li><a href="https://pytorch.org/ecosystem">Ecosystem</a></li>
            <li><a href="https://pytorch.org/blog/">Blog</a></li>
            <li><a href="https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md">Contributing</a></li>
          </ul>
        </div>

        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/resources">Resources</a></li>
            <li><a href="https://pytorch.org/tutorials">Tutorials</a></li>
            <li><a href="https://pytorch.org/docs/stable/index.html">Docs</a></li>
            <li><a href="https://discuss.pytorch.org" target="_blank">Discuss</a></li>
            <li><a href="https://github.com/pytorch/pytorch/issues" target="_blank">Github Issues</a></li>
            <li><a href="https://pytorch.org/assets/brand-guidelines/PyTorch-Brand-Guidelines.pdf" target="_blank">Brand Guidelines</a></li>
          </ul>
        </div>

        <div class="footer-links-col follow-us-col">
          <ul>
            <li class="list-title">Stay Connected</li>
            <li>
              <div id="mc_embed_signup">
                <form
                  action="https://twitter.us14.list-manage.com/subscribe/post?u=75419c71fe0a935e53dfa4a3f&id=91d0dccd39"
                  method="post"
                  id="mc-embedded-subscribe-form"
                  name="mc-embedded-subscribe-form"
                  class="email-subscribe-form validate"
                  target="_blank"
                  novalidate>
                  <div id="mc_embed_signup_scroll" class="email-subscribe-form-fields-wrapper">
                    <div class="mc-field-group">
                      <label for="mce-EMAIL" style="display:none;">Email Address</label>
                      <input type="email" value="" name="EMAIL" class="required email" id="mce-EMAIL" placeholder="Email Address">
                    </div>

                    <div id="mce-responses" class="clear">
                      <div class="response" id="mce-error-response" style="display:none"></div>
                      <div class="response" id="mce-success-response" style="display:none"></div>
                    </div>    <!-- real people should not fill this in and expect good things - do not remove this or risk form bot signups-->

                    <div style="position: absolute; left: -5000px;" aria-hidden="true"><input type="text" name="b_75419c71fe0a935e53dfa4a3f_91d0dccd39" tabindex="-1" value=""></div>

                    <div class="clear">
                      <input type="submit" value="" name="subscribe" id="mc-embedded-subscribe" class="button email-subscribe-button">
                    </div>
                  </div>
                </form>
              </div>

            </li>
          </ul>

          <div class="footer-social-icons">
            <a href="https://www.facebook.com/pytorch" target="_blank" class="facebook"></a>
            <a href="https://twitter.com/pytorch" target="_blank" class="twitter"></a>
            <a href="https://www.youtube.com/pytorch" target="_blank" class="youtube"></a>
          </div>
        </div>
      </div>
    </div>
  </footer>

  <div class="cookie-banner-wrapper">
  <div class="container">
    <p class="gdpr-notice">To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: <a href="https://www.facebook.com/policies/cookies/">Cookies Policy</a>.</p>
    <img class="close-button" src="../../_static/images/pytorch-x.svg">
  </div>
</div>

  <!-- End Footer -->

  <!-- Begin Mobile Menu -->

  <div class="mobile-main-menu">
    <div class="container-fluid">
      <div class="container">
        <div class="mobile-main-menu-header-container">
          <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
          <a class="main-menu-close-button" href="#" data-behavior="close-mobile-menu"></a>
        </div>
      </div>
    </div>

    <div class="mobile-main-menu-links-container">
      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <a href="https://pytorch.org/features">Features</a>
          </li>

          <li>
            <a href="https://pytorch.org/ecosystem">Ecosystem</a>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/hub">PyTorch Hub</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <a href="https://pytorch.org/resources">Resources</a>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>
    </div>
  </div>

  <!-- End Mobile Menu -->

  <script type="text/javascript" src="../../_static/js/vendor/anchor.min.js"></script>

  <script type="text/javascript">
    $(document).ready(function() {
      mobileMenu.bind();
      mobileTOC.bind();
      pytorchAnchors.bind();
      sideMenus.bind();
      scrollToAnchor.bind();
      highlightNavigation.bind();
      mainMenuDropdown.bind();
      filterTags.bind();

      // Remove any empty p tags that Sphinx adds
      $("[data-tags='null']").remove();

      // Add class to links that have code blocks, since we cannot create links in code blocks
      $("article.pytorch-article a span.pre").each(function(e) {
        $(this).closest("a").addClass("has-code");
      });
    })
  </script>
</body>
</html>