


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>torch.autograd.gradcheck &mdash; PyTorch master documentation</title>
  

  
  
  
  
    <link rel="canonical" href="https://pytorch.org/docs/stable/_modules/torch/autograd/gradcheck.html"/>
  

  

  
  
    

  

  <link rel="stylesheet" href="../../../_static/css/theme.css" type="text/css" />
  <!-- <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" /> -->
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/css/jit.css" type="text/css" />
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.11.1/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/katex-math.css" type="text/css" />
    <link rel="index" title="Index" href="../../../genindex.html" />
    <link rel="search" title="Search" href="../../../search.html" /> 

  
  <script src="../../../_static/js/modernizr.min.js"></script>

  <!-- Preload the theme fonts -->

<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">

<!-- Preload the katex fonts -->

<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
</head>

<div class="container-fluid header-holder tutorials-header" id="header-holder">
  <div class="container">
    <div class="header-container">
      <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>

      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <div class="ecosystem-dropdown">
              <a id="dropdownMenuButton" data-toggle="ecosystem-dropdown">
                Ecosystem
              </a>
              <div class="ecosystem-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/hub"">
                  <span class=dropdown-title>Models (Beta)</span>
                  <p>Discover, publish, and reuse pre-trained models</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/ecosystem">
                  <span class=dropdown-title>Tools & Libraries</span>
                  <p>Explore the ecosystem of tools and libraries</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <div class="resources-dropdown">
              <a id="resourcesDropdownButton" data-toggle="resources-dropdown">
                Resources
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/resources"">
                  <span class=dropdown-title>Developer Resources</span>
                  <p>Find resources and get questions answered</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/features">
                  <span class=dropdown-title>About</span>
                  <p>Learn about PyTorch’s features and capabilities</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>

      <a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
    </div>

  </div>
</div>


<body class="pytorch-body">

   

    

    <div class="table-of-contents-link-wrapper">
      <span>Table of Contents</span>
      <a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
    </div>

    <nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
      <div class="pytorch-side-scroll">
        <div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          <div class="pytorch-left-menu-search">
            

            
              
              
                <div class="version">
                  master (1.5.0 )
                </div>
              
            

            


  


<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search Docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

            
          </div>

          
<div>
  <a style="color:#F05732" href="https://pytorch.org/docs/stable/_modules/torch/autograd/gradcheck.html">
    You are viewing unstable developer preview docs.
    Click here to view docs for latest stable release.
  </a>
</div>

            
            
              
            
            
              <p class="caption"><span class="caption-text">Notes</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/amp_examples.html">Automatic Mixed Precision examples</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/autograd.html">Autograd mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/broadcasting.html">Broadcasting semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/cpu_threading_torchscript_inference.html">CPU threading and TorchScript inference</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/cuda.html">CUDA semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/ddp.html">Distributed Data Parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/extending.html">Extending PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/faq.html">Frequently Asked Questions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/large_scale_deployments.html">Features for large-scale deployments</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/multiprocessing.html">Multiprocessing best practices</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/randomness.html">Reproducibility</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/serialization.html">Serialization semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/windows.html">Windows FAQ</a></li>
</ul>
<p class="caption"><span class="caption-text">Language Bindings</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/cppdocs/">C++ API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../packages.html">Javadoc</a></li>
</ul>
<p class="caption"><span class="caption-text">Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../torch.html">torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../nn.html">torch.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../nn.functional.html">torch.nn.functional</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensors.html">torch.Tensor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensor_attributes.html">Tensor Attributes</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensor_view.html">Tensor Views</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../autograd.html">torch.autograd</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../cuda.html">torch.cuda</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../amp.html">torch.cuda.amp</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../distributed.html">torch.distributed</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../distributions.html">torch.distributions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../hub.html">torch.hub</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../jit.html">torch.jit</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../nn.init.html">torch.nn.init</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../onnx.html">torch.onnx</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../optim.html">torch.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../quantization.html">Quantization</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../rpc/index.html">Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../random.html">torch.random</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../sparse.html">torch.sparse</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../storage.html">torch.Storage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../bottleneck.html">torch.utils.bottleneck</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../checkpoint.html">torch.utils.checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../cpp_extension.html">torch.utils.cpp_extension</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../data.html">torch.utils.data</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../dlpack.html">torch.utils.dlpack</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../model_zoo.html">torch.utils.model_zoo</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensorboard.html">torch.utils.tensorboard</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../type_info.html">Type Info</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../named_tensor.html">Named Tensors</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../name_inference.html">Named Tensors operator coverage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../__config__.html">torch.__config__</a></li>
</ul>
<p class="caption"><span class="caption-text">Libraries</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/audio">torchaudio</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/text">torchtext</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/elastic/">TorchElastic</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/serve">TorchServe</a></li>
<li class="toctree-l1"><a class="reference external" href="http://pytorch.org/xla/">PyTorch on XLA Devices</a></li>
</ul>
<p class="caption"><span class="caption-text">Community</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../community/contribution_guide.html">PyTorch Contribution Guide</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../community/governance.html">PyTorch Governance</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../community/persons_of_interest.html">PyTorch Governance | Persons of Interest</a></li>
</ul>

            
          

        </div>
      </div>
    </nav>

    <div class="pytorch-container">
      <div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
        <div class="pytorch-breadcrumbs-wrapper">
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="pytorch-breadcrumbs">
    
      <li>
        <a href="../../../index.html">
          
            Docs
          
        </a> &gt;
      </li>

        
          <li><a href="../../index.html">Module code</a> &gt;</li>
        
          <li><a href="../../torch.html">torch</a> &gt;</li>
        
          <li><a href="../autograd.html">torch.autograd</a> &gt;</li>
        
      <li>torch.autograd.gradcheck</li>
    
    
      <li class="pytorch-breadcrumbs-aside">
        
      </li>
    
  </ul>

  
</div>
        </div>

        <div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
          Shortcuts
        </div>
      </div>

      <section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
        <div class="pytorch-content-left">

        
          
          <div class="rst-content">
          
            <div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
             <article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
              
  <h1>Source code for torch.autograd.gradcheck</h1><div class="highlight"><pre>
<span></span><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">from</span> <span class="nn">torch._six</span> <span class="kn">import</span> <span class="n">container_abcs</span><span class="p">,</span> <span class="n">istuple</span>
<span class="kn">import</span> <span class="nn">torch.testing</span>
<span class="kn">from</span> <span class="nn">itertools</span> <span class="kn">import</span> <span class="n">product</span>
<span class="kn">import</span> <span class="nn">warnings</span>

<span class="k">def</span> <span class="nf">zero_gradients</span><span class="p">(</span><span class="n">x</span><span class="p">):</span>
    <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">x</span><span class="o">.</span><span class="n">grad</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">x</span><span class="o">.</span><span class="n">grad</span><span class="o">.</span><span class="n">detach_</span><span class="p">()</span>
            <span class="n">x</span><span class="o">.</span><span class="n">grad</span><span class="o">.</span><span class="n">zero_</span><span class="p">()</span>
    <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">container_abcs</span><span class="o">.</span><span class="n">Iterable</span><span class="p">):</span>
        <span class="k">for</span> <span class="n">elem</span> <span class="ow">in</span> <span class="n">x</span><span class="p">:</span>
            <span class="n">zero_gradients</span><span class="p">(</span><span class="n">elem</span><span class="p">)</span>


<span class="k">def</span> <span class="nf">make_jacobian</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">num_out</span><span class="p">):</span>
    <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">):</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="nb">input</span><span class="o">.</span><span class="n">is_floating_point</span><span class="p">():</span>
            <span class="k">return</span> <span class="kc">None</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="nb">input</span><span class="o">.</span><span class="n">requires_grad</span><span class="p">:</span>
            <span class="k">return</span> <span class="kc">None</span>
        <span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="nb">input</span><span class="o">.</span><span class="n">nelement</span><span class="p">(),</span> <span class="n">num_out</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="nb">input</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
    <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">container_abcs</span><span class="o">.</span><span class="n">Iterable</span><span class="p">)</span> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
        <span class="n">jacobians</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">filter</span><span class="p">(</span>
            <span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">x</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">,</span> <span class="p">(</span><span class="n">make_jacobian</span><span class="p">(</span><span class="n">elem</span><span class="p">,</span> <span class="n">num_out</span><span class="p">)</span> <span class="k">for</span> <span class="n">elem</span> <span class="ow">in</span> <span class="nb">input</span><span class="p">)))</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">jacobians</span><span class="p">:</span>
            <span class="k">return</span> <span class="kc">None</span>
        <span class="k">return</span> <span class="nb">type</span><span class="p">(</span><span class="nb">input</span><span class="p">)(</span><span class="n">jacobians</span><span class="p">)</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="k">return</span> <span class="kc">None</span>


<span class="k">def</span> <span class="nf">iter_tensors</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">only_requiring_grad</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
    <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">x</span><span class="o">.</span><span class="n">requires_grad</span> <span class="ow">or</span> <span class="ow">not</span> <span class="n">only_requiring_grad</span><span class="p">:</span>
            <span class="k">yield</span> <span class="n">x</span>
    <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">container_abcs</span><span class="o">.</span><span class="n">Iterable</span><span class="p">)</span> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span>
        <span class="k">for</span> <span class="n">elem</span> <span class="ow">in</span> <span class="n">x</span><span class="p">:</span>
            <span class="k">for</span> <span class="n">result</span> <span class="ow">in</span> <span class="n">iter_tensors</span><span class="p">(</span><span class="n">elem</span><span class="p">,</span> <span class="n">only_requiring_grad</span><span class="p">):</span>
                <span class="k">yield</span> <span class="n">result</span>


<span class="k">def</span> <span class="nf">get_numerical_jacobian</span><span class="p">(</span><span class="n">fn</span><span class="p">,</span> <span class="nb">input</span><span class="p">,</span> <span class="n">target</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">eps</span><span class="o">=</span><span class="mf">1e-3</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    input: input to `fn`</span>
<span class="sd">    target: the Tensors wrt whom Jacobians are calculated (default=`input`)</span>

<span class="sd">    Note that `target` may not even be part of `input` to `fn`, so please be</span>
<span class="sd">    **very careful** in this to not clone `target`.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="n">target</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">target</span> <span class="o">=</span> <span class="nb">input</span>
    <span class="n">output_size</span> <span class="o">=</span> <span class="n">fn</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span>
    <span class="n">jacobian</span> <span class="o">=</span> <span class="n">make_jacobian</span><span class="p">(</span><span class="n">target</span><span class="p">,</span> <span class="n">output_size</span><span class="p">)</span>

    <span class="c1"># It&#39;s much easier to iterate over flattened lists of tensors.</span>
    <span class="c1"># These are reference to the same objects in jacobian, so any changes</span>
    <span class="c1"># will be reflected in it as well.</span>
    <span class="n">x_tensors</span> <span class="o">=</span> <span class="n">iter_tensors</span><span class="p">(</span><span class="n">target</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
    <span class="n">j_tensors</span> <span class="o">=</span> <span class="n">iter_tensors</span><span class="p">(</span><span class="n">jacobian</span><span class="p">)</span>

    <span class="c1"># TODO: compare structure</span>
    <span class="k">for</span> <span class="n">x_tensor</span><span class="p">,</span> <span class="n">d_tensor</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">x_tensors</span><span class="p">,</span> <span class="n">j_tensors</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">x_tensor</span><span class="o">.</span><span class="n">is_sparse</span><span class="p">:</span>
            <span class="k">def</span> <span class="nf">get_stride</span><span class="p">(</span><span class="n">size</span><span class="p">):</span>
                <span class="n">dim</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">size</span><span class="p">)</span>
                <span class="n">tmp</span> <span class="o">=</span> <span class="mi">1</span>
                <span class="n">stride</span> <span class="o">=</span> <span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">*</span> <span class="n">dim</span>
                <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">reversed</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="n">dim</span><span class="p">)):</span>
                    <span class="n">stride</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">tmp</span>
                    <span class="n">tmp</span> <span class="o">*=</span> <span class="n">size</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
                <span class="k">return</span> <span class="n">stride</span>

            <span class="n">x_nnz</span> <span class="o">=</span> <span class="n">x_tensor</span><span class="o">.</span><span class="n">_nnz</span><span class="p">()</span>
            <span class="n">x_size</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">x_tensor</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
            <span class="n">x_indices</span> <span class="o">=</span> <span class="n">x_tensor</span><span class="o">.</span><span class="n">_indices</span><span class="p">()</span><span class="o">.</span><span class="n">t</span><span class="p">()</span>
            <span class="n">x_values</span> <span class="o">=</span> <span class="n">x_tensor</span><span class="o">.</span><span class="n">_values</span><span class="p">()</span>
            <span class="n">x_stride</span> <span class="o">=</span> <span class="n">get_stride</span><span class="p">(</span><span class="n">x_size</span><span class="p">)</span>

            <span class="c1"># Use .data here to get around the version check</span>
            <span class="n">x_values</span> <span class="o">=</span> <span class="n">x_values</span><span class="o">.</span><span class="n">data</span>

            <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">x_nnz</span><span class="p">):</span>
                <span class="n">x_value</span> <span class="o">=</span> <span class="n">x_values</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
                <span class="k">for</span> <span class="n">x_idx</span> <span class="ow">in</span> <span class="n">product</span><span class="p">(</span><span class="o">*</span><span class="p">[</span><span class="nb">range</span><span class="p">(</span><span class="n">m</span><span class="p">)</span> <span class="k">for</span> <span class="n">m</span> <span class="ow">in</span> <span class="n">x_values</span><span class="o">.</span><span class="n">size</span><span class="p">()[</span><span class="mi">1</span><span class="p">:]]):</span>
                    <span class="n">indices</span> <span class="o">=</span> <span class="n">x_indices</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">tolist</span><span class="p">()</span> <span class="o">+</span> <span class="nb">list</span><span class="p">(</span><span class="n">x_idx</span><span class="p">)</span>
                    <span class="n">d_idx</span> <span class="o">=</span> <span class="nb">sum</span><span class="p">(</span><span class="n">indices</span><span class="p">[</span><span class="n">k</span><span class="p">]</span> <span class="o">*</span> <span class="n">x_stride</span><span class="p">[</span><span class="n">k</span><span class="p">]</span> <span class="k">for</span> <span class="n">k</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">x_size</span><span class="p">)))</span>
                    <span class="n">orig</span> <span class="o">=</span> <span class="n">x_value</span><span class="p">[</span><span class="n">x_idx</span><span class="p">]</span><span class="o">.</span><span class="n">item</span><span class="p">()</span>
                    <span class="n">x_value</span><span class="p">[</span><span class="n">x_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">orig</span> <span class="o">-</span> <span class="n">eps</span>
                    <span class="n">outa</span> <span class="o">=</span> <span class="n">fn</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span><span class="o">.</span><span class="n">clone</span><span class="p">()</span>
                    <span class="n">x_value</span><span class="p">[</span><span class="n">x_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">orig</span> <span class="o">+</span> <span class="n">eps</span>
                    <span class="n">outb</span> <span class="o">=</span> <span class="n">fn</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span><span class="o">.</span><span class="n">clone</span><span class="p">()</span>
                    <span class="n">x_value</span><span class="p">[</span><span class="n">x_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">orig</span>
                    <span class="n">r</span> <span class="o">=</span> <span class="p">(</span><span class="n">outb</span> <span class="o">-</span> <span class="n">outa</span><span class="p">)</span> <span class="o">/</span> <span class="p">(</span><span class="mi">2</span> <span class="o">*</span> <span class="n">eps</span><span class="p">)</span>
                    <span class="n">d_tensor</span><span class="p">[</span><span class="n">d_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">r</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span>
        <span class="k">elif</span> <span class="n">x_tensor</span><span class="o">.</span><span class="n">layout</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">_mkldnn</span><span class="p">:</span>
            <span class="c1"># Use .data here to get around the version check</span>
            <span class="n">x_tensor</span> <span class="o">=</span> <span class="n">x_tensor</span><span class="o">.</span><span class="n">data</span>
            <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">1</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s1">&#39;gradcheck currently only supports functions with 1 input, but got: &#39;</span><span class="p">,</span>
                                 <span class="nb">len</span><span class="p">(</span><span class="nb">input</span><span class="p">))</span>
            <span class="k">for</span> <span class="n">d_idx</span><span class="p">,</span> <span class="n">x_idx</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">product</span><span class="p">(</span><span class="o">*</span><span class="p">[</span><span class="nb">range</span><span class="p">(</span><span class="n">m</span><span class="p">)</span> <span class="k">for</span> <span class="n">m</span> <span class="ow">in</span> <span class="n">x_tensor</span><span class="o">.</span><span class="n">size</span><span class="p">()])):</span>
                <span class="c1"># this is really inefficient, but without indexing implemented, there&#39;s</span>
                <span class="c1"># not really a better way than converting back and forth</span>
                <span class="n">x_tensor_dense</span> <span class="o">=</span> <span class="n">x_tensor</span><span class="o">.</span><span class="n">to_dense</span><span class="p">()</span>
                <span class="n">orig</span> <span class="o">=</span> <span class="n">x_tensor_dense</span><span class="p">[</span><span class="n">x_idx</span><span class="p">]</span><span class="o">.</span><span class="n">item</span><span class="p">()</span>

                <span class="n">x_tensor_dense</span><span class="p">[</span><span class="n">x_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">orig</span> <span class="o">-</span> <span class="n">eps</span>
                <span class="n">x_tensor_mkl</span> <span class="o">=</span> <span class="n">x_tensor_dense</span><span class="o">.</span><span class="n">to_mkldnn</span><span class="p">()</span>
                <span class="n">outa</span> <span class="o">=</span> <span class="n">fn</span><span class="p">([</span><span class="n">x_tensor_mkl</span><span class="p">])</span>

                <span class="n">x_tensor_dense</span><span class="p">[</span><span class="n">x_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">orig</span> <span class="o">+</span> <span class="n">eps</span>
                <span class="n">x_tensor_mkl</span> <span class="o">=</span> <span class="n">x_tensor_dense</span><span class="o">.</span><span class="n">to_mkldnn</span><span class="p">()</span>
                <span class="n">outb</span> <span class="o">=</span> <span class="n">fn</span><span class="p">([</span><span class="n">x_tensor_mkl</span><span class="p">])</span>

                <span class="n">r</span> <span class="o">=</span> <span class="p">(</span><span class="n">outb</span> <span class="o">-</span> <span class="n">outa</span><span class="p">)</span> <span class="o">/</span> <span class="p">(</span><span class="mi">2</span> <span class="o">*</span> <span class="n">eps</span><span class="p">)</span>
                <span class="n">d_tensor</span><span class="p">[</span><span class="n">d_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">r</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="c1"># Use .data here to get around the version check</span>
            <span class="n">x_tensor</span> <span class="o">=</span> <span class="n">x_tensor</span><span class="o">.</span><span class="n">data</span>
            <span class="k">for</span> <span class="n">d_idx</span><span class="p">,</span> <span class="n">x_idx</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">product</span><span class="p">(</span><span class="o">*</span><span class="p">[</span><span class="nb">range</span><span class="p">(</span><span class="n">m</span><span class="p">)</span> <span class="k">for</span> <span class="n">m</span> <span class="ow">in</span> <span class="n">x_tensor</span><span class="o">.</span><span class="n">size</span><span class="p">()])):</span>
                <span class="n">orig</span> <span class="o">=</span> <span class="n">x_tensor</span><span class="p">[</span><span class="n">x_idx</span><span class="p">]</span><span class="o">.</span><span class="n">item</span><span class="p">()</span>
                <span class="n">x_tensor</span><span class="p">[</span><span class="n">x_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">orig</span> <span class="o">-</span> <span class="n">eps</span>
                <span class="n">outa</span> <span class="o">=</span> <span class="n">fn</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span><span class="o">.</span><span class="n">clone</span><span class="p">()</span>
                <span class="n">x_tensor</span><span class="p">[</span><span class="n">x_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">orig</span> <span class="o">+</span> <span class="n">eps</span>
                <span class="n">outb</span> <span class="o">=</span> <span class="n">fn</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span><span class="o">.</span><span class="n">clone</span><span class="p">()</span>
                <span class="n">x_tensor</span><span class="p">[</span><span class="n">x_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">orig</span>
                <span class="n">r</span> <span class="o">=</span> <span class="p">(</span><span class="n">outb</span> <span class="o">-</span> <span class="n">outa</span><span class="p">)</span> <span class="o">/</span> <span class="p">(</span><span class="mi">2</span> <span class="o">*</span> <span class="n">eps</span><span class="p">)</span>
                <span class="n">d_tensor</span><span class="p">[</span><span class="n">d_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">r</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span>

    <span class="k">return</span> <span class="n">jacobian</span>


<span class="k">def</span> <span class="nf">get_analytical_jacobian</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">output</span><span class="p">,</span> <span class="n">nondet_tol</span><span class="o">=</span><span class="mf">0.0</span><span class="p">):</span>
    <span class="c1"># it is easier to call to_dense() on the sparse output than</span>
    <span class="c1"># to modify analytical jacobian</span>
    <span class="k">if</span> <span class="n">output</span><span class="o">.</span><span class="n">is_sparse</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s1">&#39;Sparse output is not supported at gradcheck yet. &#39;</span>
                         <span class="s1">&#39;Please call to_dense() on the output of fn for gradcheck.&#39;</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">output</span><span class="o">.</span><span class="n">layout</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">_mkldnn</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s1">&#39;MKLDNN output is not supported at gradcheck yet. &#39;</span>
                         <span class="s1">&#39;Please call to_dense() on the output of fn for gradcheck.&#39;</span><span class="p">)</span>
    <span class="n">diff_input_list</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">iter_tensors</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="kc">True</span><span class="p">))</span>
    <span class="n">jacobian</span> <span class="o">=</span> <span class="n">make_jacobian</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">output</span><span class="o">.</span><span class="n">numel</span><span class="p">())</span>
    <span class="n">jacobian_reentrant</span> <span class="o">=</span> <span class="n">make_jacobian</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">output</span><span class="o">.</span><span class="n">numel</span><span class="p">())</span>
    <span class="n">grad_output</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros_like</span><span class="p">(</span><span class="n">output</span><span class="p">,</span> <span class="n">memory_format</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">legacy_contiguous_format</span><span class="p">)</span>
    <span class="n">flat_grad_output</span> <span class="o">=</span> <span class="n">grad_output</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span>
    <span class="n">reentrant</span> <span class="o">=</span> <span class="kc">True</span>
    <span class="n">correct_grad_sizes</span> <span class="o">=</span> <span class="kc">True</span>

    <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">flat_grad_output</span><span class="o">.</span><span class="n">numel</span><span class="p">()):</span>
        <span class="n">flat_grad_output</span><span class="o">.</span><span class="n">zero_</span><span class="p">()</span>
        <span class="n">flat_grad_output</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="mi">1</span>
        <span class="k">for</span> <span class="n">jacobian_c</span> <span class="ow">in</span> <span class="p">(</span><span class="n">jacobian</span><span class="p">,</span> <span class="n">jacobian_reentrant</span><span class="p">):</span>
            <span class="n">grads_input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">grad</span><span class="p">(</span><span class="n">output</span><span class="p">,</span> <span class="n">diff_input_list</span><span class="p">,</span> <span class="n">grad_output</span><span class="p">,</span>
                                              <span class="n">retain_graph</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">allow_unused</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
            <span class="k">for</span> <span class="n">jacobian_x</span><span class="p">,</span> <span class="n">d_x</span><span class="p">,</span> <span class="n">x</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">jacobian_c</span><span class="p">,</span> <span class="n">grads_input</span><span class="p">,</span> <span class="n">diff_input_list</span><span class="p">):</span>
                <span class="k">if</span> <span class="n">d_x</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">d_x</span><span class="o">.</span><span class="n">size</span><span class="p">()</span> <span class="o">!=</span> <span class="n">x</span><span class="o">.</span><span class="n">size</span><span class="p">():</span>
                    <span class="n">correct_grad_sizes</span> <span class="o">=</span> <span class="kc">False</span>
                <span class="k">elif</span> <span class="n">jacobian_x</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">:</span>
                    <span class="k">if</span> <span class="n">d_x</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
                        <span class="n">jacobian_x</span><span class="p">[:,</span> <span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">zero_</span><span class="p">()</span>
                    <span class="k">else</span><span class="p">:</span>
                        <span class="n">d_x_dense</span> <span class="o">=</span> <span class="n">d_x</span><span class="o">.</span><span class="n">to_dense</span><span class="p">()</span> <span class="k">if</span> <span class="ow">not</span> <span class="n">d_x</span><span class="o">.</span><span class="n">layout</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">strided</span> <span class="k">else</span> <span class="n">d_x</span>
                        <span class="k">assert</span> <span class="n">jacobian_x</span><span class="p">[:,</span> <span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span> <span class="o">==</span> <span class="n">d_x_dense</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span>
                        <span class="n">jacobian_x</span><span class="p">[:,</span> <span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">d_x_dense</span><span class="o">.</span><span class="n">contiguous</span><span class="p">()</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span>

    <span class="k">for</span> <span class="n">jacobian_x</span><span class="p">,</span> <span class="n">jacobian_reentrant_x</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">jacobian</span><span class="p">,</span> <span class="n">jacobian_reentrant</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">jacobian_x</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span> <span class="o">!=</span> <span class="mi">0</span> <span class="ow">and</span> <span class="p">(</span><span class="n">jacobian_x</span> <span class="o">-</span> <span class="n">jacobian_reentrant_x</span><span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">max</span><span class="p">()</span> <span class="o">&gt;</span> <span class="n">nondet_tol</span><span class="p">:</span>
            <span class="n">reentrant</span> <span class="o">=</span> <span class="kc">False</span>

    <span class="k">return</span> <span class="n">jacobian</span><span class="p">,</span> <span class="n">reentrant</span><span class="p">,</span> <span class="n">correct_grad_sizes</span>


<span class="k">def</span> <span class="nf">_as_tuple</span><span class="p">(</span><span class="n">x</span><span class="p">):</span>
    <span class="k">if</span> <span class="n">istuple</span><span class="p">(</span><span class="n">x</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">x</span>
    <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="nb">list</span><span class="p">):</span>
        <span class="k">return</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">x</span><span class="p">,</span>


<span class="k">def</span> <span class="nf">_differentiable_outputs</span><span class="p">(</span><span class="n">x</span><span class="p">):</span>
    <span class="k">return</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">o</span> <span class="k">for</span> <span class="n">o</span> <span class="ow">in</span> <span class="n">_as_tuple</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="k">if</span> <span class="n">o</span><span class="o">.</span><span class="n">requires_grad</span><span class="p">)</span>


<div class="viewcode-block" id="gradcheck"><a class="viewcode-back" href="../../../autograd.html#torch.autograd.gradcheck">[docs]</a><span class="k">def</span> <span class="nf">gradcheck</span><span class="p">(</span><span class="n">func</span><span class="p">,</span> <span class="n">inputs</span><span class="p">,</span> <span class="n">eps</span><span class="o">=</span><span class="mf">1e-6</span><span class="p">,</span> <span class="n">atol</span><span class="o">=</span><span class="mf">1e-5</span><span class="p">,</span> <span class="n">rtol</span><span class="o">=</span><span class="mf">1e-3</span><span class="p">,</span> <span class="n">raise_exception</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">check_sparse_nnz</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">nondet_tol</span><span class="o">=</span><span class="mf">0.0</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Check gradients computed via small finite differences against analytical</span>
<span class="sd">    gradients w.r.t. tensors in :attr:`inputs` that are of floating point type</span>
<span class="sd">    and with ``requires_grad=True``.</span>

<span class="sd">    The check between numerical and analytical gradients uses :func:`~torch.allclose`.</span>

<span class="sd">    .. note::</span>
<span class="sd">        The default values are designed for :attr:`input` of double precision.</span>
<span class="sd">        This check will likely fail if :attr:`input` is of less precision, e.g.,</span>
<span class="sd">        ``FloatTensor``.</span>

<span class="sd">    .. warning::</span>
<span class="sd">       If any checked tensor in :attr:`input` has overlapping memory, i.e.,</span>
<span class="sd">       different indices pointing to the same memory address (e.g., from</span>
<span class="sd">       :func:`torch.expand`), this check will likely fail because the numerical</span>
<span class="sd">       gradients computed by point perturbation at such indices will change</span>
<span class="sd">       values at all other indices that share the same memory address.</span>

<span class="sd">    Args:</span>
<span class="sd">        func (function): a Python function that takes Tensor inputs and returns</span>
<span class="sd">            a Tensor or a tuple of Tensors</span>
<span class="sd">        inputs (tuple of Tensor or Tensor): inputs to the function</span>
<span class="sd">        eps (float, optional): perturbation for finite differences</span>
<span class="sd">        atol (float, optional): absolute tolerance</span>
<span class="sd">        rtol (float, optional): relative tolerance</span>
<span class="sd">        raise_exception (bool, optional): indicating whether to raise an exception if</span>
<span class="sd">            the check fails. The exception gives more information about the</span>
<span class="sd">            exact nature of the failure. This is helpful when debugging gradchecks.</span>
<span class="sd">        check_sparse_nnz (bool, optional): if True, gradcheck allows for SparseTensor input,</span>
<span class="sd">            and for any SparseTensor at input, gradcheck will perform check at nnz positions only.</span>
<span class="sd">        nondet_tol (float, optional): tolerance for non-determinism. When running</span>
<span class="sd">            identical inputs through the differentiation, the results must either match</span>
<span class="sd">            exactly (default, 0.0) or be within this tolerance.</span>

<span class="sd">    Returns:</span>
<span class="sd">        True if all differences satisfy allclose condition</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="nf">fail_test</span><span class="p">(</span><span class="n">msg</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">raise_exception</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="n">msg</span><span class="p">)</span>
        <span class="k">return</span> <span class="kc">False</span>

    <span class="n">tupled_inputs</span> <span class="o">=</span> <span class="n">_as_tuple</span><span class="p">(</span><span class="n">inputs</span><span class="p">)</span>
    <span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="n">t</span><span class="o">.</span><span class="n">is_sparse</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tupled_inputs</span> <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">t</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">))</span> <span class="ow">and</span> <span class="ow">not</span> <span class="n">check_sparse_nnz</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">fail_test</span><span class="p">(</span><span class="s1">&#39;gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False.&#39;</span><span class="p">)</span>

    <span class="c1"># Make sure that gradients are saved for all inputs</span>
    <span class="n">any_input_requiring_grad</span> <span class="o">=</span> <span class="kc">False</span>
    <span class="n">some_input_not_requiring_grad</span> <span class="o">=</span> <span class="kc">False</span>
    <span class="k">for</span> <span class="n">inp</span> <span class="ow">in</span> <span class="n">tupled_inputs</span><span class="p">:</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">inp</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">):</span>
            <span class="k">if</span> <span class="n">inp</span><span class="o">.</span><span class="n">requires_grad</span><span class="p">:</span>
                <span class="k">if</span> <span class="n">inp</span><span class="o">.</span><span class="n">dtype</span> <span class="o">!=</span> <span class="n">torch</span><span class="o">.</span><span class="n">float64</span><span class="p">:</span>
                    <span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span>
                        <span class="s1">&#39;At least one of the inputs that requires gradient &#39;</span>
                        <span class="s1">&#39;is not of double precision floating point. &#39;</span>
                        <span class="s1">&#39;This check will likely fail if all the inputs are &#39;</span>
                        <span class="s1">&#39;not of double precision floating point. &#39;</span><span class="p">)</span>
                <span class="n">any_input_requiring_grad</span> <span class="o">=</span> <span class="kc">True</span>
                <span class="n">inp</span><span class="o">.</span><span class="n">retain_grad</span><span class="p">()</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">some_input_not_requiring_grad</span> <span class="o">=</span> <span class="kc">True</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">any_input_requiring_grad</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
            <span class="s1">&#39;gradcheck expects at least one input tensor to require gradient, &#39;</span>
            <span class="s1">&#39;but none of the them have requires_grad=True.&#39;</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">some_input_not_requiring_grad</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
                <span class="s1">&#39;gradcheck expects if at least one input tensor is required gradient, &#39;</span>
                <span class="s1">&#39;then all other inputs should have requires_grad=True.&#39;</span><span class="p">)</span>

    <span class="n">func_out</span> <span class="o">=</span> <span class="n">func</span><span class="p">(</span><span class="o">*</span><span class="n">tupled_inputs</span><span class="p">)</span>
    <span class="n">output</span> <span class="o">=</span> <span class="n">_differentiable_outputs</span><span class="p">(</span><span class="n">func_out</span><span class="p">)</span>

    <span class="k">if</span> <span class="ow">not</span> <span class="n">output</span><span class="p">:</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">o</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">func_out</span><span class="p">):</span>
            <span class="k">def</span> <span class="nf">fn</span><span class="p">(</span><span class="nb">input</span><span class="p">):</span>
                <span class="k">return</span> <span class="n">_as_tuple</span><span class="p">(</span><span class="n">func</span><span class="p">(</span><span class="o">*</span><span class="nb">input</span><span class="p">))[</span><span class="n">i</span><span class="p">]</span>
            <span class="n">numerical</span> <span class="o">=</span> <span class="n">get_numerical_jacobian</span><span class="p">(</span><span class="n">fn</span><span class="p">,</span> <span class="n">tupled_inputs</span><span class="p">,</span> <span class="n">eps</span><span class="o">=</span><span class="n">eps</span><span class="p">)</span>
            <span class="k">for</span> <span class="n">n</span> <span class="ow">in</span> <span class="n">numerical</span><span class="p">:</span>
                <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">nonzero</span><span class="p">(</span><span class="n">n</span><span class="p">))</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
                    <span class="k">return</span> <span class="n">fail_test</span><span class="p">(</span><span class="s1">&#39;Numerical gradient for function expected to be zero&#39;</span><span class="p">)</span>
        <span class="k">return</span> <span class="kc">True</span>

    <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">o</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">output</span><span class="p">):</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">o</span><span class="o">.</span><span class="n">requires_grad</span><span class="p">:</span>
            <span class="k">continue</span>

        <span class="k">def</span> <span class="nf">fn</span><span class="p">(</span><span class="nb">input</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">_as_tuple</span><span class="p">(</span><span class="n">func</span><span class="p">(</span><span class="o">*</span><span class="nb">input</span><span class="p">))[</span><span class="n">i</span><span class="p">]</span>

        <span class="n">analytical</span><span class="p">,</span> <span class="n">reentrant</span><span class="p">,</span> <span class="n">correct_grad_sizes</span> <span class="o">=</span> <span class="n">get_analytical_jacobian</span><span class="p">(</span><span class="n">tupled_inputs</span><span class="p">,</span> <span class="n">o</span><span class="p">,</span> <span class="n">nondet_tol</span><span class="o">=</span><span class="n">nondet_tol</span><span class="p">)</span>
        <span class="n">numerical</span> <span class="o">=</span> <span class="n">get_numerical_jacobian</span><span class="p">(</span><span class="n">fn</span><span class="p">,</span> <span class="n">tupled_inputs</span><span class="p">,</span> <span class="n">eps</span><span class="o">=</span><span class="n">eps</span><span class="p">)</span>

        <span class="k">if</span> <span class="ow">not</span> <span class="n">correct_grad_sizes</span><span class="p">:</span>
            <span class="k">return</span> <span class="n">fail_test</span><span class="p">(</span><span class="s1">&#39;Analytical gradient has incorrect size&#39;</span><span class="p">)</span>

        <span class="k">for</span> <span class="n">j</span><span class="p">,</span> <span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">n</span><span class="p">)</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="n">analytical</span><span class="p">,</span> <span class="n">numerical</span><span class="p">)):</span>
            <span class="k">if</span> <span class="n">a</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span> <span class="o">!=</span> <span class="mi">0</span> <span class="ow">or</span> <span class="n">n</span><span class="o">.</span><span class="n">numel</span><span class="p">()</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">:</span>
                <span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">allclose</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">n</span><span class="p">,</span> <span class="n">rtol</span><span class="p">,</span> <span class="n">atol</span><span class="p">):</span>
                    <span class="k">return</span> <span class="n">fail_test</span><span class="p">(</span><span class="s1">&#39;Jacobian mismatch for output </span><span class="si">%d</span><span class="s1"> with respect to input </span><span class="si">%d</span><span class="s1">,</span><span class="se">\n</span><span class="s1">&#39;</span>
                                     <span class="s1">&#39;numerical:</span><span class="si">%s</span><span class="se">\n</span><span class="s1">analytical:</span><span class="si">%s</span><span class="se">\n</span><span class="s1">&#39;</span> <span class="o">%</span> <span class="p">(</span><span class="n">i</span><span class="p">,</span> <span class="n">j</span><span class="p">,</span> <span class="n">n</span><span class="p">,</span> <span class="n">a</span><span class="p">))</span>

        <span class="k">if</span> <span class="ow">not</span> <span class="n">reentrant</span><span class="p">:</span>
            <span class="k">return</span> <span class="n">fail_test</span><span class="p">(</span><span class="s1">&#39;Backward is not reentrant, i.e., running backward with same &#39;</span>
                             <span class="s1">&#39;input and grad_output multiple times gives different values, &#39;</span>
                             <span class="s1">&#39;although analytical gradient matches numerical gradient. &#39;</span>
                             <span class="s1">&#39;The tolerance for nondeterminism was </span><span class="si">{}</span><span class="s1">.&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">nondet_tol</span><span class="p">))</span>

    <span class="c1"># check if the backward multiplies by grad_output</span>
    <span class="n">output</span> <span class="o">=</span> <span class="n">_differentiable_outputs</span><span class="p">(</span><span class="n">func</span><span class="p">(</span><span class="o">*</span><span class="n">tupled_inputs</span><span class="p">))</span>
    <span class="k">if</span> <span class="nb">any</span><span class="p">([</span><span class="n">o</span><span class="o">.</span><span class="n">requires_grad</span> <span class="k">for</span> <span class="n">o</span> <span class="ow">in</span> <span class="n">output</span><span class="p">]):</span>
        <span class="n">diff_input_list</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">iter_tensors</span><span class="p">(</span><span class="n">tupled_inputs</span><span class="p">,</span> <span class="kc">True</span><span class="p">))</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">diff_input_list</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;no Tensors requiring grad found in input&quot;</span><span class="p">)</span>
        <span class="n">grads_input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">grad</span><span class="p">(</span><span class="n">output</span><span class="p">,</span> <span class="n">diff_input_list</span><span class="p">,</span>
                                          <span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">zeros_like</span><span class="p">(</span><span class="n">o</span><span class="p">,</span> <span class="n">memory_format</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">legacy_contiguous_format</span><span class="p">)</span> <span class="k">for</span> <span class="n">o</span> <span class="ow">in</span> <span class="n">output</span><span class="p">],</span>
                                          <span class="n">allow_unused</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">gi</span><span class="p">,</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">grads_input</span><span class="p">,</span> <span class="n">diff_input_list</span><span class="p">):</span>
            <span class="k">if</span> <span class="n">gi</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
                <span class="k">continue</span>
            <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">gi</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">)</span> <span class="ow">and</span> <span class="n">gi</span><span class="o">.</span><span class="n">layout</span> <span class="o">!=</span> <span class="n">torch</span><span class="o">.</span><span class="n">strided</span><span class="p">:</span>
                <span class="k">if</span> <span class="n">gi</span><span class="o">.</span><span class="n">layout</span> <span class="o">!=</span> <span class="n">i</span><span class="o">.</span><span class="n">layout</span><span class="p">:</span>
                    <span class="k">return</span> <span class="n">fail_test</span><span class="p">(</span><span class="s1">&#39;grad is incorrect layout (&#39;</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">gi</span><span class="o">.</span><span class="n">layout</span><span class="p">)</span> <span class="o">+</span> <span class="s1">&#39; is not &#39;</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">i</span><span class="o">.</span><span class="n">layout</span><span class="p">)</span> <span class="o">+</span> <span class="s1">&#39;)&#39;</span><span class="p">)</span>
                <span class="k">if</span> <span class="n">gi</span><span class="o">.</span><span class="n">layout</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo</span><span class="p">:</span>
                    <span class="k">if</span> <span class="n">gi</span><span class="o">.</span><span class="n">sparse_dim</span><span class="p">()</span> <span class="o">!=</span> <span class="n">i</span><span class="o">.</span><span class="n">sparse_dim</span><span class="p">():</span>
                        <span class="k">return</span> <span class="n">fail_test</span><span class="p">(</span><span class="s1">&#39;grad is sparse tensor, but has incorrect sparse_dim&#39;</span><span class="p">)</span>
                    <span class="k">if</span> <span class="n">gi</span><span class="o">.</span><span class="n">dense_dim</span><span class="p">()</span> <span class="o">!=</span> <span class="n">i</span><span class="o">.</span><span class="n">dense_dim</span><span class="p">():</span>
                        <span class="k">return</span> <span class="n">fail_test</span><span class="p">(</span><span class="s1">&#39;grad is sparse tensor, but has incorrect dense_dim&#39;</span><span class="p">)</span>
                <span class="n">gi</span> <span class="o">=</span> <span class="n">gi</span><span class="o">.</span><span class="n">to_dense</span><span class="p">()</span>
                <span class="n">i</span> <span class="o">=</span> <span class="n">i</span><span class="o">.</span><span class="n">to_dense</span><span class="p">()</span>
            <span class="k">if</span> <span class="ow">not</span> <span class="n">gi</span><span class="o">.</span><span class="n">eq</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">all</span><span class="p">():</span>
                <span class="k">return</span> <span class="n">fail_test</span><span class="p">(</span><span class="s1">&#39;backward not multiplied by grad_output&#39;</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">gi</span><span class="o">.</span><span class="n">type</span><span class="p">()</span> <span class="o">!=</span> <span class="n">i</span><span class="o">.</span><span class="n">type</span><span class="p">():</span>
                <span class="k">return</span> <span class="n">fail_test</span><span class="p">(</span><span class="s2">&quot;grad is incorrect type&quot;</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">gi</span><span class="o">.</span><span class="n">size</span><span class="p">()</span> <span class="o">!=</span> <span class="n">i</span><span class="o">.</span><span class="n">size</span><span class="p">():</span>
                <span class="k">return</span> <span class="n">fail_test</span><span class="p">(</span><span class="s1">&#39;grad is incorrect size&#39;</span><span class="p">)</span>

    <span class="k">return</span> <span class="kc">True</span></div>


<div class="viewcode-block" id="gradgradcheck"><a class="viewcode-back" href="../../../autograd.html#torch.autograd.gradgradcheck">[docs]</a><span class="k">def</span> <span class="nf">gradgradcheck</span><span class="p">(</span><span class="n">func</span><span class="p">,</span> <span class="n">inputs</span><span class="p">,</span> <span class="n">grad_outputs</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">eps</span><span class="o">=</span><span class="mf">1e-6</span><span class="p">,</span> <span class="n">atol</span><span class="o">=</span><span class="mf">1e-5</span><span class="p">,</span> <span class="n">rtol</span><span class="o">=</span><span class="mf">1e-3</span><span class="p">,</span>
                  <span class="n">gen_non_contig_grad_outputs</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">raise_exception</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
                  <span class="n">nondet_tol</span><span class="o">=</span><span class="mf">0.0</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Check gradients of gradients computed via small finite differences</span>
<span class="sd">    against analytical gradients w.r.t. tensors in :attr:`inputs` and</span>
<span class="sd">    :attr:`grad_outputs` that are of floating point type and with</span>
<span class="sd">    ``requires_grad=True``.</span>

<span class="sd">    This function checks that backpropagating through the gradients computed</span>
<span class="sd">    to the given :attr:`grad_outputs` are correct.</span>

<span class="sd">    The check between numerical and analytical gradients uses :func:`~torch.allclose`.</span>

<span class="sd">    .. note::</span>
<span class="sd">        The default values are designed for :attr:`input` and</span>
<span class="sd">        :attr:`grad_outputs` of double precision. This check will likely fail if</span>
<span class="sd">        they are of less precision, e.g., ``FloatTensor``.</span>

<span class="sd">    .. warning::</span>
<span class="sd">       If any checked tensor in :attr:`input` and :attr:`grad_outputs` has</span>
<span class="sd">       overlapping memory, i.e., different indices pointing to the same memory</span>
<span class="sd">       address (e.g., from :func:`torch.expand`), this check will likely fail</span>
<span class="sd">       because the numerical gradients computed by point perturbation at such</span>
<span class="sd">       indices will change values at all other indices that share the same</span>
<span class="sd">       memory address.</span>

<span class="sd">    Args:</span>
<span class="sd">        func (function): a Python function that takes Tensor inputs and returns</span>
<span class="sd">            a Tensor or a tuple of Tensors</span>
<span class="sd">        inputs (tuple of Tensor or Tensor): inputs to the function</span>
<span class="sd">        grad_outputs (tuple of Tensor or Tensor, optional): The gradients with</span>
<span class="sd">            respect to the function&#39;s outputs.</span>
<span class="sd">        eps (float, optional): perturbation for finite differences</span>
<span class="sd">        atol (float, optional): absolute tolerance</span>
<span class="sd">        rtol (float, optional): relative tolerance</span>
<span class="sd">        gen_non_contig_grad_outputs (bool, optional): if :attr:`grad_outputs` is</span>
<span class="sd">            ``None`` and :attr:`gen_non_contig_grad_outputs` is ``True``, the</span>
<span class="sd">            randomly generated gradient outputs are made to be noncontiguous</span>
<span class="sd">        raise_exception (bool, optional): indicating whether to raise an exception if</span>
<span class="sd">            the check fails. The exception gives more information about the</span>
<span class="sd">            exact nature of the failure. This is helpful when debugging gradchecks.</span>
<span class="sd">        nondet_tol (float, optional): tolerance for non-determinism. When running</span>
<span class="sd">            identical inputs through the differentiation, the results must either match</span>
<span class="sd">            exactly (default, 0.0) or be within this tolerance. Note that a small amount</span>
<span class="sd">            of nondeterminism in the gradient will lead to larger inaccuracies in</span>
<span class="sd">            the second derivative.</span>

<span class="sd">    Returns:</span>
<span class="sd">        True if all differences satisfy allclose condition</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">tupled_inputs</span> <span class="o">=</span> <span class="n">_as_tuple</span><span class="p">(</span><span class="n">inputs</span><span class="p">)</span>

    <span class="k">if</span> <span class="n">grad_outputs</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
        <span class="c1"># If grad_outputs is not specified, create random Tensors of the same</span>
        <span class="c1"># shape, type, and device as the outputs</span>
        <span class="k">def</span> <span class="nf">randn_like</span><span class="p">(</span><span class="n">x</span><span class="p">):</span>
            <span class="n">y</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">testing</span><span class="o">.</span><span class="n">randn_like</span><span class="p">(</span><span class="n">x</span> <span class="k">if</span> <span class="n">x</span><span class="o">.</span><span class="n">is_floating_point</span><span class="p">()</span> <span class="k">else</span> <span class="n">x</span><span class="o">.</span><span class="n">double</span><span class="p">(),</span> <span class="n">memory_format</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">legacy_contiguous_format</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">gen_non_contig_grad_outputs</span><span class="p">:</span>
                <span class="n">y</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">testing</span><span class="o">.</span><span class="n">make_non_contiguous</span><span class="p">(</span><span class="n">y</span><span class="p">)</span>
            <span class="k">return</span> <span class="n">y</span><span class="o">.</span><span class="n">requires_grad_</span><span class="p">()</span>
        <span class="n">outputs</span> <span class="o">=</span> <span class="n">_as_tuple</span><span class="p">(</span><span class="n">func</span><span class="p">(</span><span class="o">*</span><span class="n">tupled_inputs</span><span class="p">))</span>
        <span class="n">tupled_grad_outputs</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">randn_like</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">outputs</span><span class="p">)</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="n">tupled_grad_outputs</span> <span class="o">=</span> <span class="n">_as_tuple</span><span class="p">(</span><span class="n">grad_outputs</span><span class="p">)</span>

    <span class="n">num_outputs</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">tupled_grad_outputs</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">new_func</span><span class="p">(</span><span class="o">*</span><span class="n">args</span><span class="p">):</span>
        <span class="n">input_args</span> <span class="o">=</span> <span class="n">args</span><span class="p">[:</span><span class="o">-</span><span class="n">num_outputs</span><span class="p">]</span>
        <span class="n">grad_outputs</span> <span class="o">=</span> <span class="n">args</span><span class="p">[</span><span class="o">-</span><span class="n">num_outputs</span><span class="p">:]</span>
        <span class="n">outputs</span> <span class="o">=</span> <span class="n">_differentiable_outputs</span><span class="p">(</span><span class="n">func</span><span class="p">(</span><span class="o">*</span><span class="n">input_args</span><span class="p">))</span>
        <span class="n">input_args</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">x</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">input_args</span> <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">)</span> <span class="ow">and</span> <span class="n">x</span><span class="o">.</span><span class="n">requires_grad</span><span class="p">)</span>
        <span class="n">grad_inputs</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">grad</span><span class="p">(</span><span class="n">outputs</span><span class="p">,</span> <span class="n">input_args</span><span class="p">,</span> <span class="n">grad_outputs</span><span class="p">,</span> <span class="n">create_graph</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">grad_inputs</span>

    <span class="k">return</span> <span class="n">gradcheck</span><span class="p">(</span><span class="n">new_func</span><span class="p">,</span> <span class="n">tupled_inputs</span> <span class="o">+</span> <span class="n">tupled_grad_outputs</span><span class="p">,</span> <span class="n">eps</span><span class="p">,</span> <span class="n">atol</span><span class="p">,</span> <span class="n">rtol</span><span class="p">,</span> <span class="n">raise_exception</span><span class="p">,</span>
                     <span class="n">nondet_tol</span><span class="o">=</span><span class="n">nondet_tol</span><span class="p">)</span></div>
</pre></div>

             </article>
             
            </div>
            <footer>
  

  

    <hr>

  

  <div role="contentinfo">
    <p>
        &copy; Copyright 2019, Torch Contributors.

    </p>
  </div>
    
      <div>
        Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
      </div>
     

</footer>

          </div>
        </div>

        <div class="pytorch-content-right" id="pytorch-content-right">
          <div class="pytorch-right-menu" id="pytorch-right-menu">
            <div class="pytorch-side-scroll" id="pytorch-side-scroll-right">
              
            </div>
          </div>
        </div>
      </section>
    </div>

  


  

     
       <script type="text/javascript" id="documentation_options" data-url_root="../../../" src="../../../_static/documentation_options.js"></script>
         <script src="../../../_static/jquery.js"></script>
         <script src="../../../_static/underscore.js"></script>
         <script src="../../../_static/doctools.js"></script>
         <script src="../../../_static/language_data.js"></script>
     

  

  <script type="text/javascript" src="../../../_static/js/vendor/popper.min.js"></script>
  <script type="text/javascript" src="../../../_static/js/vendor/bootstrap.min.js"></script>
  <script src="https://cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
  <script type="text/javascript" src="../../../_static/js/theme.js"></script>

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>
 
<script>
  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
  })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

  ga('create', 'UA-90545585-1', 'auto');
  ga('send', 'pageview');

</script>

<script async src="https://www.googletagmanager.com/gtag/js?id=UA-117752657-2"></script>

<script>
  window.dataLayer = window.dataLayer || [];

  function gtag(){dataLayer.push(arguments);}

  gtag('js', new Date());
  gtag('config', 'UA-117752657-2');
</script>

<img height="1" width="1" style="border-style:none;" alt="" src="https://www.googleadservices.com/pagead/conversion/795629140/?label=txkmCPmdtosBENSssfsC&amp;guid=ON&amp;script=0"/>


  <!-- Begin Footer -->

  <div class="container-fluid docs-tutorials-resources" id="docs-tutorials-resources">
    <div class="container">
      <div class="row">
        <div class="col-md-4 text-center">
          <h2>Docs</h2>
          <p>Access comprehensive developer documentation for PyTorch</p>
          <a class="with-right-arrow" href="https://pytorch.org/docs/stable/index.html">View Docs</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Tutorials</h2>
          <p>Get in-depth tutorials for beginners and advanced developers</p>
          <a class="with-right-arrow" href="https://pytorch.org/tutorials">View Tutorials</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Resources</h2>
          <p>Find development resources and get your questions answered</p>
          <a class="with-right-arrow" href="https://pytorch.org/resources">View Resources</a>
        </div>
      </div>
    </div>
  </div>

  <footer class="site-footer">
    <div class="container footer-container">
      <div class="footer-logo-wrapper">
        <a href="https://pytorch.org/" class="footer-logo"></a>
      </div>

      <div class="footer-links-wrapper">
        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/">PyTorch</a></li>
            <li><a href="https://pytorch.org/get-started">Get Started</a></li>
            <li><a href="https://pytorch.org/features">Features</a></li>
            <li><a href="https://pytorch.org/ecosystem">Ecosystem</a></li>
            <li><a href="https://pytorch.org/blog/">Blog</a></li>
            <li><a href="https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md">Contributing</a></li>
          </ul>
        </div>

        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/resources">Resources</a></li>
            <li><a href="https://pytorch.org/tutorials">Tutorials</a></li>
            <li><a href="https://pytorch.org/docs/stable/index.html">Docs</a></li>
            <li><a href="https://discuss.pytorch.org" target="_blank">Discuss</a></li>
            <li><a href="https://github.com/pytorch/pytorch/issues" target="_blank">Github Issues</a></li>
            <li><a href="https://pytorch.org/assets/brand-guidelines/PyTorch-Brand-Guidelines.pdf" target="_blank">Brand Guidelines</a></li>
          </ul>
        </div>

        <div class="footer-links-col follow-us-col">
          <ul>
            <li class="list-title">Stay Connected</li>
            <li>
              <div id="mc_embed_signup">
                <form
                  action="https://twitter.us14.list-manage.com/subscribe/post?u=75419c71fe0a935e53dfa4a3f&id=91d0dccd39"
                  method="post"
                  id="mc-embedded-subscribe-form"
                  name="mc-embedded-subscribe-form"
                  class="email-subscribe-form validate"
                  target="_blank"
                  novalidate>
                  <div id="mc_embed_signup_scroll" class="email-subscribe-form-fields-wrapper">
                    <div class="mc-field-group">
                      <label for="mce-EMAIL" style="display:none;">Email Address</label>
                      <input type="email" value="" name="EMAIL" class="required email" id="mce-EMAIL" placeholder="Email Address">
                    </div>

                    <div id="mce-responses" class="clear">
                      <div class="response" id="mce-error-response" style="display:none"></div>
                      <div class="response" id="mce-success-response" style="display:none"></div>
                    </div>    <!-- real people should not fill this in and expect good things - do not remove this or risk form bot signups-->

                    <div style="position: absolute; left: -5000px;" aria-hidden="true"><input type="text" name="b_75419c71fe0a935e53dfa4a3f_91d0dccd39" tabindex="-1" value=""></div>

                    <div class="clear">
                      <input type="submit" value="" name="subscribe" id="mc-embedded-subscribe" class="button email-subscribe-button">
                    </div>
                  </div>
                </form>
              </div>

            </li>
          </ul>

          <div class="footer-social-icons">
            <a href="https://www.facebook.com/pytorch" target="_blank" class="facebook"></a>
            <a href="https://twitter.com/pytorch" target="_blank" class="twitter"></a>
            <a href="https://www.youtube.com/pytorch" target="_blank" class="youtube"></a>
          </div>
        </div>
      </div>
    </div>
  </footer>

  <div class="cookie-banner-wrapper">
  <div class="container">
    <p class="gdpr-notice">To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: <a href="https://www.facebook.com/policies/cookies/">Cookies Policy</a>.</p>
    <img class="close-button" src="../../../_static/images/pytorch-x.svg">
  </div>
</div>

  <!-- End Footer -->

  <!-- Begin Mobile Menu -->

  <div class="mobile-main-menu">
    <div class="container-fluid">
      <div class="container">
        <div class="mobile-main-menu-header-container">
          <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
          <a class="main-menu-close-button" href="#" data-behavior="close-mobile-menu"></a>
        </div>
      </div>
    </div>

    <div class="mobile-main-menu-links-container">
      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <a href="https://pytorch.org/features">Features</a>
          </li>

          <li>
            <a href="https://pytorch.org/ecosystem">Ecosystem</a>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/hub">PyTorch Hub</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <a href="https://pytorch.org/resources">Resources</a>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>
    </div>
  </div>

  <!-- End Mobile Menu -->

  <script type="text/javascript" src="../../../_static/js/vendor/anchor.min.js"></script>

  <script type="text/javascript">
    $(document).ready(function() {
      mobileMenu.bind();
      mobileTOC.bind();
      pytorchAnchors.bind();
      sideMenus.bind();
      scrollToAnchor.bind();
      highlightNavigation.bind();
      mainMenuDropdown.bind();
      filterTags.bind();

      // Remove any empty p tags that Sphinx adds
      $("[data-tags='null']").remove();

      // Add class to links that have code blocks, since we cannot create links in code blocks
      $("article.pytorch-article a span.pre").each(function(e) {
        $(this).closest("a").addClass("has-code");
      });
    })
  </script>
</body>
</html>