<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  <meta http-equiv="X-UA-Compatible" content="IE=edge">
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <meta name="author" content="Rogerspy">
  <link rel="canonical" href="https://pytorch-zh.gitee.io/faq/">
  <link rel="shortcut icon" href="/pytorch-zh/img/favicon.ico">
  <title>PyTorch FAQ - Pytorch 中文文档（1.4.0）</title>
  <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700" />

  <link rel="stylesheet" href="../css/theme.css" />
  <link rel="stylesheet" href="../css/theme_extra.css" />
  <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css" />
  
  <script>
    // Current page data
    var mkdocs_page_name = "PyTorch FAQ";
    var mkdocs_page_input_path = "faq.md";
    var mkdocs_page_url = "/faq/";
  </script>
  
  <script src="../js/jquery-2.1.1.min.js" defer></script>
  <script src="../js/modernizr-2.8.3.min.js" defer></script>
  <script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
  <script>hljs.initHighlightingOnLoad();</script> 
  
</head>

<body class="wy-body-for-nav" role="document">

  <div class="wy-grid-for-nav">

    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
    <div class="wy-side-scroll">
      <div class="wy-side-nav-search">
	    <img src='/pytorch-zh/img/logo.svg'/>
        <a href=".." class="icon icon-home"> Pytorch 中文文档（1.4.0）</a>
        <div role="search">
  <form id ="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" title="Type search term here" />
  </form>
</div>
      </div>

      <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="..">主页</a>
                    </li>
                </ul>
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="../get-started/">60分钟快速入门</a>
                    </li>
                </ul>
                <ul class="current">
                    <li class="toctree-l1 current"><a class="reference internal current" href="./">PyTorch FAQ</a>
    <ul class="current">
    </ul>
                    </li>
                </ul>
                <p class="caption"><span class="caption-text">两种基本结构</span></p>
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="../torchnn/parameters/">参数 Parameters</a>
                    </li>
                    <li class="toctree-l1"><a class="reference internal" href="#">容器 Containers</a>
    <ul>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/module/">Module</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/sequential/">Sequential</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/modulelist/">Modulelist</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/moduledict/">Moduledict</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/parameterlist/">Parameterlist</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/parameterdict/">Parameterdict</a>
                </li>
    </ul>
                    </li>
                </ul>
                <p class="caption"><span class="caption-text">网络层</span></p>
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="#">卷积层</a>
    <ul>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/conv1d/">Conv1d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/conv2d/">Conv2d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/conv3d/">Conv3d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/convtranspose1d/">Convtranspose1d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/convtranspose2d/">Convtranspose2d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/convtranspose3d/">Convtranspose3d</a>
                </li>
    </ul>
                    </li>
                </ul>
      </div>
    </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
        <a href="..">Pytorch 中文文档（1.4.0）</a>
      </nav>

      
      <div class="wy-nav-content">
        <div class="rst-content">
          <div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
    <li><a href="..">Docs</a> &raquo;</li>
    
      
    
    <li>PyTorch FAQ</li>
    <li class="wy-breadcrumbs-aside">
      
    </li>
  </ul>
  
  <hr/>
</div>
          <div role="main">
            <div class="section">
              
                <h1>PyTorch FAQ: 常见问题解答</h1>

<div class="toc">
<ul>
<li><a href="#cuda-2">我的模型报告“CUDA 运行时错误（2）：内存不足”</a></li>
<li><a href="#gpu">我的 GPU 显存没有正确释放</a></li>
<li><a href="#_1">我的数据加载器返回相同的随机数</a></li>
<li><a href="#_2">我的循环神经网络不能使用数据并行</a></li>
<li><a href="#register_buffer-register_parameter">register_buffer 和register_parameter 有什么不同？</a></li>
<li><a href="#pytorch-hook">PyTorch 中的钩子（Hook）有何作用？</a></li>
</ul>
</div>
<h2 id="cuda-2">我的模型报告“CUDA 运行时错误（2）：内存不足”<a class="headerlink" href="#cuda-2" title="Permanent link">&para;</a></h2>
<p>正如错误提示指出的你的GPU耗尽了。由于你经常会在 PyTorch 中处理大批量数据，因此一个小的错误就可能导致你的程序耗尽你所有的显存。幸运的是，这种情况下修复程序通常比较简单，以下是一些常见的做法：</p>
<ul>
<li><strong>不要在整个训练循环中累计历史记录。</strong> 默认情况下，涉及需要梯度的变量会保留历史记录。这就意味着你在计算过程中尽量避免使用此类变量，使这些变量将不受训练循环的影响，比如在追踪统计信息的时候。相反，你应该分离变量或访问其基础数据。</li>
</ul>
<p>有时可微变量出现时不太明显。考虑下面的训练循环：</p>
<div class="codehilite"><pre><span></span><code><span class="n">total_loss</span> <span class="o">=</span> <span class="mi">0</span>
<span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">10000</span><span class="p">):</span>
    <span class="n">optimizer</span><span class="o">.</span><span class="n">zero_grad</span><span class="p">()</span>
    <span class="n">outputs</span> <span class="o">=</span> <span class="n">model</span><span class="p">(</span><span class="n">inputs</span><span class="p">)</span>
    <span class="n">loss</span> <span class="o">=</span> <span class="n">criterion</span><span class="p">(</span><span class="n">outputs</span><span class="p">)</span>
    <span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
    <span class="n">optimizer</span><span class="o">.</span><span class="n">setp</span><span class="p">()</span>
    <span class="n">total_loss</span> <span class="o">+=</span> <span class="n">loss</span>
</code></pre></div>

<p>这里 <code>total_loss</code> 在整个训练过程是累加的，因为 <code>loss</code> 是一个带有 <code>autograd</code> 的可微变量。你可以通过将 <code>total_loss</code> 改写成 <code>total_loss += float(loss)</code> 来修复。  </p>
<p>此问题的另外一个案例：<a href="https://discuss.pytorch.org/t/resolved-gpu-out-of-memory-error-with-batch-size-1/3719">GPU out of memory error with batch size = 1</a></p>
<ul>
<li><strong>不要使用不需要的张量和变量。</strong> 如果将 <code>Tensor</code> 或者 <code>Variable</code> 赋值给本地，Python 不会自动释放内存，直到超出作用域范围。你可以通过 <code>del x</code> 手动释放内存。类似的，如果将张量或者变量赋值给一个对象的内存，它同样要到超出作用于范围才会释放。只有不保留无用的临时变量才能使你的内存使用达到最佳。（注：这主要是 Python 自身的垃圾管理机制以及 Python 中的变量生命周期决定的，与 PyTorch 关系不大。因此，一个良好的编码规范也是很重要的。）</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">5</span><span class="p">):</span>
    <span class="n">intermediate</span> <span class="o">=</span> <span class="n">f</span><span class="p">(</span><span class="n">inputs</span><span class="p">[</span><span class="n">i</span><span class="p">])</span>
    <span class="n">result</span> <span class="o">+=</span> <span class="n">g</span><span class="p">(</span><span class="n">intermediate</span><span class="p">)</span>
<span class="n">output</span> <span class="o">=</span> <span class="n">h</span><span class="p">(</span><span class="n">result</span><span class="p">)</span>
</code></pre></div>

<p>这里 <code>intermediate</code> 直到 while 循环结束都还存在，因为它的作用于超出了循环本身，要在使用完以后提早使用 <code>del itermediate</code> 释放掉它。</p>
<ul>
<li><strong>不要对太长的序列使用RNN。</strong> 通过 RNN 进行反向传播所需要的内存和 RNN 的输入的长度成正比。因此，如果你给一个 RNN 传入过长的序列的话很容易耗尽内存。</li>
</ul>
<p>这种现象的术语称之为 <strong><em><a href="https://en.wikipedia.org/wiki/Backpropagation_through_time">Backpropagation through time (BPTT)</a></em></strong>，有很多关于如何实现截断 <em>BPTT （truncated BPTT）</em> 的参考资料，包括<a href="https://github.com/pytorch/examples/tree/master/word_language_model">词语言模型</a>。截断是通过<a href="https://discuss.pytorch.org/t/help-clarifying-repackage-hidden-in-word-language-model/226">这篇文章</a>描述的 <code>repackage</code> 函数处理的。</p>
<ul>
<li><strong>不要使用太大的线性层。</strong> 线性层 <code>nn.Linear(m, n)</code> 占用 <script type="math/tex">O(nm)</script> 大小的内存：也就是说权重消耗的内存与特征数成二阶线性关系（或者说与特征数的平方成正比）。这很容易耗尽你的内存（记住，你的内存至少应该是权重所占内存的两倍，因为你还需要存储梯度）。</li>
</ul>
<h2 id="gpu">我的 GPU 显存没有正确释放<a class="headerlink" href="#gpu" title="Permanent link">&para;</a></h2>
<p>PyTorch 使用缓存内存分配器来加速内存分配。因此，<code>nvidia-smi</code> 通常不能正确反映真实的内存使用情况。参看 <a href="https://pytorch.org/docs/stable/notes/cuda.html#cuda-memory-management"> GPU 内存管理机制</a> 了解详情。</p>
<p>如果你 python 已经退出了但 GPU 显存还没有释放的话，可能是一些 python 子进程仍然在运行。你可以通过 <code>ps -elf|grap python</code> 来找到它们，然后用 <code>kill -9 [pid]</code> 来手动结束这些进程。</p>
<h2 id="_1">我的数据加载器返回相同的随机数<a class="headerlink" href="#_1" title="Permanent link">&para;</a></h2>
<p>你可能使用了其他的库在数据集中生成随机数。例如当加载器通过 <code>fork</code> 启动子进程的时候， Numpy的RNG会被复制。查看 <a href="https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader"><code>torch.utils.data.DataLoader</code></a> 文档了解如何使用 <code>worker_init_fn</code> 来正确地设置随机种子。  </p>
<h2 id="_2">我的循环神经网络不能使用数据并行<a class="headerlink" href="#_2" title="Permanent link">&para;</a></h2>
<p>在 <code>Module</code> 中利用 <code>DataParallel</code> 或者 <code>data_parallel()</code> 使用 <code>pack sequence -&gt; recurrent network -&gt; unpack sequence</code> 模式是比较   。输入到每个设备上的每个 <code>backward()</code> 中的数据只是整个数输入一部分。因为拆包操作 <code>torch.nn.utils.rnn.pad_packed_sequence()</code>  默认情况下仅填充其看到的最长的输入，即该设备上最长的输入。因此，将结果汇总到一起时会发生尺寸不匹配的情况。你可以利用 <code>pad_packed_sequence()</code> 的 <code>total_length</code> 变量来确保 <code>forward()</code> 调用相同长度的返回序列。比如：</p>
<div class="codehilite"><pre><span></span><code><span class="kn">from</span> <span class="nn">torch.nn.utils.rnn</span> <span class="kn">import</span> <span class="n">pack_padded_sequence</span><span class="p">,</span> <span class="n">pad_packed_sequence</span>

<span class="k">class</span> <span class="nc">MyModule</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="c1"># ... __init__</span>
    <span class="c1"># 其他的一些方法</span>

    <span class="c1"># padded_input: [B x T x *]</span>
    <span class="c1"># B: batch_size</span>
    <span class="c1"># T: 最大长度</span>
    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">padded_input</span><span class="p">,</span> <span class="n">input_lengths</span><span class="p">):</span>
        <span class="n">total_length</span> <span class="o">=</span> <span class="n">padded_input</span><span class="o">.</span><span class="n">size</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>  <span class="c1"># 获取最大长度</span>
        <span class="n">packed_input</span> <span class="o">=</span> <span class="n">pack_padded_sequence</span><span class="p">(</span>
            <span class="n">padded_input</span><span class="p">,</span>
            <span class="n">input_lengths</span><span class="p">,</span>
            <span class="n">batch_first</span><span class="o">=</span><span class="kc">True</span>
        <span class="p">)</span>
        <span class="n">packed_output</span><span class="p">,</span> <span class="n">_</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">my_lstm</span><span class="p">(</span><span class="n">packed_input</span><span class="p">)</span>
        <span class="n">output</span><span class="p">,</span> <span class="n">_</span> <span class="o">=</span> <span class="n">pad_packed_sequence</span><span class="p">(</span>
            <span class="n">packed_output</span><span class="p">,</span>
            <span class="n">batch_first</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
            <span class="n">total_length</span><span class="o">=</span><span class="n">total_length</span>
        <span class="p">)</span>
        <span class="k">return</span> <span class="n">output</span>

<span class="n">m</span> <span class="o">=</span> <span class="n">MyModule</span><span class="p">()</span><span class="o">.</span><span class="n">cuda</span>
<span class="n">dp_m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">DataParallel</span><span class="p">(</span><span class="n">m</span><span class="p">)</span>
</code></pre></div>

<p>此外，当 <code>batch_size = 1</code> 时（即 <code>batch_size = False</code>）且使用 <code>DataParallel</code>  模式需要格外小心。这种情况下 <code>pack_padded_sequence</code> 第一个参数 <code>padded_input</code> 的形状是 <code>[T x B x *]</code>且沿着第 1 维（第二个维度，从 0 开始计数）发散，但是第二个参数 <code>input_lengths</code> 的形状应该是 <code>[B]</code>且沿着第 0 维发散。我们还需要额外的代码来控制张量的形状。</p>
<h2 id="register_buffer-register_parameter"><code>register_buffer</code> 和<code>register_parameter</code> 有什么不同？<a class="headerlink" href="#register_buffer-register_parameter" title="Permanent link">&para;</a></h2>
<p>参考：<a href="https://zhuanlan.zhihu.com/p/89442276">Pytorch模型中的parameter与buffer</a>，ccilery，知乎</p>
<ul>
<li><strong>模型保存</strong></li>
</ul>
<p>在 Pytorch 中一种模型保存和加载的方式如下:</p>
<div class="codehilite"><pre><span></span><code><span class="c1"># save</span>
<span class="n">torch</span><span class="o">.</span><span class="n">save</span><span class="p">(</span><span class="n">model</span><span class="o">.</span><span class="n">state_dict</span><span class="p">(),</span> <span class="n">PATH</span><span class="p">)</span>

<span class="c1"># load</span>
<span class="n">model</span> <span class="o">=</span> <span class="n">MyModel</span><span class="p">(</span><span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="n">model</span><span class="o">.</span><span class="n">load_state_dict</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">PATH</span><span class="p">))</span>
<span class="n">model</span><span class="o">.</span><span class="n">eval</span><span class="p">()</span>
</code></pre></div>

<p>可以看到模型保存的是 <code>model.state_dict()</code> 的返回对象。 <code>model.state_dict()</code> 的返回对象是一个 <code>OrderDict</code> ，它以键值对的形式包含模型中需要保存下来的参数，例如:</p>
<div class="codehilite"><pre><span></span><code><span class="k">class</span> <span class="nc">MyModule</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_size</span><span class="p">,</span> <span class="n">output_size</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">MyModule</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">lin</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">input_size</span><span class="p">,</span> <span class="n">output_size</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">lin</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>

<span class="n">module</span> <span class="o">=</span> <span class="n">MyModule</span><span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">module</span><span class="o">.</span><span class="n">state_dict</span><span class="p">())</span>
</code></pre></div>

<p>输出结果:</p>
<div class="codehilite"><pre><span></span><code><span class="n">OrderedDict</span><span class="p">([(</span><span class="s1">&#39;lin.weight&#39;</span><span class="p">,</span> <span class="n">tensor</span><span class="p">([[</span><span class="o">-</span><span class="mf">0.3347</span><span class="p">,</span>  <span class="mf">0.1639</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.1005</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.3017</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.4824</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0583</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.2385</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.2486</span><span class="p">]])),</span> <span class="p">(</span><span class="s1">&#39;lin.bias&#39;</span><span class="p">,</span> <span class="n">tensor</span><span class="p">([</span><span class="o">-</span><span class="mf">0.2472</span><span class="p">,</span>  <span class="mf">0.1320</span><span class="p">]))])</span>
</code></pre></div>

<p>模型中的参数就是线性层的 weight 和 bias。</p>
<ul>
<li><strong>Parameter 和 buffer</strong></li>
</ul>
<p>模型中需要保存下来的参数包括两种:</p>
<blockquote>
<ol>
<li>一种是反向传播需要被优化器更新的，称之为 <em>parameter</em></li>
<li>一种是反向传播不需要被优化器更新，称之为 <em>buffer</em></li>
</ol>
</blockquote>
<p>第一种参数我们可以通过 <code>model.parameters()</code> 返回；第二种参数我们可以通过 <code>model.buffers()</code> 返回。因为我们的模型保存的是 <code>state_dict</code> 返回的 <code>OrderDict</code>，所以这两种参数不仅要满足是否需要被更新的要求，还需要被保存到<code>OrderDict</code>。</p>
<ul>
<li><strong>创建 Parameter 和 buffer</strong></li>
</ul>
<p>创建 <em>Parameter</em> 有两种方式:</p>
<blockquote>
<ol>
<li>我们可以直接将模型的成员变量（<em>self.xxx</em>）通过<code>nn.Parameter()</code> 创建，会自动注册到parameters 中，可以通过 <code>model.parameters()</code> 返回，并且这样创建的参数会自动保存到<code>OrderDict</code> 中去；</li>
<li>通过<code>nn.Parameter()</code> 创建普通 <code>Parameter</code> 对象，不作为模型的成员变量，然后将 <code>Parameter</code> 对象通过 <code>register_parameter()</code> 进行注册，可以通 <code>model.parameters()</code> 返回，注册后的参数也会自动保存到 <code>OrderDict</code> 中去;</li>
</ol>
</blockquote>
<p>创建  <em>buffer</em> 我们需要创建 <em>tensor</em>, 然后将 <em>tensor</em> 通过 <code>register_buffer()</code> 进行注册，可以通<code>model.buffers()</code> 返回，注册完后参数也会自动保存到<code>OrderDict</code>中去。</p>
<ul>
<li><strong>示例</strong></li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="k">class</span> <span class="nc">MyModel</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">MyModel</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="n">buffer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">)</span>  <span class="c1"># tensor</span>
        <span class="n">param1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>  <span class="c1"># 普通 Parameter 对象</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">register_buffer</span><span class="p">(</span><span class="s1">&#39;my_buffer&#39;</span><span class="p">,</span> <span class="n">buffer</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">register_parameter</span><span class="p">(</span><span class="s2">&quot;param1&quot;</span><span class="p">,</span> <span class="n">param1</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">param2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>  <span class="c1"># 成员变量</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="c1"># 可以通过 self.param 和 self.my_buffer 访问</span>
        <span class="k">pass</span>

<span class="n">model</span> <span class="o">=</span> <span class="n">MyModel</span><span class="p">()</span>
<span class="k">for</span> <span class="n">param</span> <span class="ow">in</span> <span class="n">model</span><span class="o">.</span><span class="n">parameters</span><span class="p">():</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">param</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="s2">&quot;----------------&quot;</span><span class="p">)</span>
<span class="k">for</span> <span class="n">buffer</span> <span class="ow">in</span> <span class="n">model</span><span class="o">.</span><span class="n">buffers</span><span class="p">():</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">buffer</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="s2">&quot;----------------&quot;</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">model</span><span class="o">.</span><span class="n">state_dict</span><span class="p">())</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Parameter</span> <span class="n">containing</span><span class="p">:</span>                                                                     
<span class="n">tensor</span><span class="p">([[</span><span class="o">-</span><span class="mf">0.2999</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.7543</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.6188</span><span class="p">],</span>                                                      
        <span class="p">[</span> <span class="mf">2.3201</span><span class="p">,</span>  <span class="mf">0.8618</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.8242</span><span class="p">],</span>                                                      
        <span class="p">[</span> <span class="mf">0.8877</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.4337</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.9099</span><span class="p">]],</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>                                 
<span class="n">Parameter</span> <span class="n">containing</span><span class="p">:</span>                                                                     
<span class="n">tensor</span><span class="p">([[</span> <span class="mf">0.5617</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.1148</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.3498</span><span class="p">],</span>                                                      
        <span class="p">[</span> <span class="mf">0.4099</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.0035</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.5538</span><span class="p">],</span>                                                      
        <span class="p">[</span> <span class="mf">0.3747</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.1125</span><span class="p">,</span>  <span class="mf">1.5162</span><span class="p">]],</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>                                 
<span class="o">----------------</span>                                                                          
<span class="n">tensor</span><span class="p">([[</span><span class="o">-</span><span class="mf">0.0102</span><span class="p">,</span>  <span class="mf">0.6938</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0352</span><span class="p">],</span>                                                      
        <span class="p">[</span> <span class="mf">0.4702</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.4319</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.2221</span><span class="p">]])</span>                                                     
<span class="o">----------------</span>                                                                          
<span class="n">OrderedDict</span><span class="p">([(</span><span class="s1">&#39;param1&#39;</span><span class="p">,</span> <span class="n">tensor</span><span class="p">([[</span><span class="o">-</span><span class="mf">0.2999</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.7543</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.6188</span><span class="p">],</span>                              
        <span class="p">[</span> <span class="mf">2.3201</span><span class="p">,</span>  <span class="mf">0.8618</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.8242</span><span class="p">],</span>                                                      
        <span class="p">[</span> <span class="mf">0.8877</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.4337</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.9099</span><span class="p">]])),</span> <span class="p">(</span><span class="s1">&#39;param2&#39;</span><span class="p">,</span> <span class="n">tensor</span><span class="p">([[</span> <span class="mf">0.5617</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.1148</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.3498</span><span class="p">],</span>   
        <span class="p">[</span> <span class="mf">0.4099</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.0035</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.5538</span><span class="p">],</span>                                                      
        <span class="p">[</span> <span class="mf">0.3747</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.1125</span><span class="p">,</span>  <span class="mf">1.5162</span><span class="p">]])),</span> <span class="p">(</span><span class="s1">&#39;my_buffer&#39;</span><span class="p">,</span> <span class="n">tensor</span><span class="p">([[</span><span class="o">-</span><span class="mf">0.0102</span><span class="p">,</span>  <span class="mf">0.6938</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0352</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.4702</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.4319</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.2221</span><span class="p">]]))])</span>                                                  
</code></pre></div>

<ul>
<li><strong>两种创建 Parameter 的方式有什么区别呢？</strong></li>
</ul>
<blockquote>
<ol>
<li>使用 <code>register_parameter</code> 方式需要传递一个字符串作为参数，直接创建则不需要；</li>
<li>个人代码风格</li>
</ol>
</blockquote>
<ul>
<li><strong>为什么不把参数都设置为 <code>nn.Parameter</code> 类型，把不需要更新参数的设置 <code>requires_grad=False?</code></strong></li>
</ul>
<blockquote>
<ol>
<li>
<p>如果把 <code>nn.Parameter</code> 的 <code>requires_grad</code>设置成 <code>False</code>，有可能会造成一些意想不到的问题，比如 优化器报错：<code>optimizing a parameter that doesn't require gradients</code>，详细参看<a href="https://github.com/pytorch/pytorch/issues/679">Allow optimizers to skip nn.Parameters that have requires_grad=False #679</a></p>
</li>
<li>
<p><em>buffer</em> 不会出现在 <code>.parameters()</code> 里面，如果你在实验过程中希望对比不同的模型参数，而对于不参与训练的参数也作为比较目标的话可以使用上面的方法，比如在实现 <em>Transformer</em> 的时候，位置编码是不参与训练的，但是我们可以把它作为模型参数作为对比不同模型的参数量。</p>
</li>
<li>这种编码方法使代码的可读性不强，这种方法很容易给其他人带来困惑。</li>
<li>对于 <code>torch.Tensor</code> 来说，它不会出现在 <code>OrderedDict</code> 中，因此无法存储和重新加载。</li>
</ol>
</blockquote>
<ul>
<li><strong>为什么不直接将不需要进行参数修改的变量作为模型类的成员变量就好了，还要进行注册?</strong></li>
</ul>
<blockquote>
<ol>
<li>不进行注册，参数不能保存到 <code>OrderDict</code>，也就无法进行保存</li>
<li>模型进行参数在 CPU 和 GPU 移动时, 执行 <code>model.to(device)</code> ，注册后的参数也会自动进行设备移动</li>
</ol>
<div class="codehilite"><pre><span></span><code><span class="k">class</span> <span class="nc">MyModel</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">MyModel</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">my_tensor</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span> <span class="c1"># 参数直接作为模型类成员变量</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">register_buffer</span><span class="p">(</span><span class="s1">&#39;my_buffer&#39;</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">1</span><span class="p">))</span> <span class="c1"># 参数注册为 buffer</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">my_param</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">1</span><span class="p">))</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
            <span class="k">return</span> <span class="n">x</span>

<span class="n">model</span> <span class="o">=</span> <span class="n">MyModel</span><span class="p">()</span>
<span class="nb">print</span><span class="p">(</span><span class="n">model</span><span class="o">.</span><span class="n">state_dict</span><span class="p">())</span>
<span class="n">model</span><span class="o">.</span><span class="n">cuda</span><span class="p">()</span>
<span class="nb">print</span><span class="p">(</span><span class="n">model</span><span class="o">.</span><span class="n">my_tensor</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">model</span><span class="o">.</span><span class="n">my_buffer</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">OrderedDict</span><span class="p">([(</span><span class="s1">&#39;my_param&#39;</span><span class="p">,</span> <span class="n">tensor</span><span class="p">([</span><span class="o">-</span><span class="mf">0.9022</span><span class="p">])),</span> <span class="p">(</span><span class="s1">&#39;my_buffer&#39;</span><span class="p">,</span> <span class="n">tensor</span><span class="p">([</span><span class="o">-</span><span class="mf">1.4827</span><span class="p">]))])</span>
<span class="n">tensor</span><span class="p">([</span><span class="o">-</span><span class="mf">0.1872</span><span class="p">])</span>
<span class="n">tensor</span><span class="p">([</span><span class="o">-</span><span class="mf">1.4827</span><span class="p">],</span> <span class="n">device</span><span class="o">=</span><span class="s1">&#39;cuda:0&#39;</span><span class="p">)</span>
</code></pre></div>

<p>可以看到模型类的成员变量不在<code>OrderDict</code>中，不能进行保存；模型在进行设备移动时，模型类的成员变量没有进行移动。</p>
</blockquote>
<h2 id="pytorch-hook">PyTorch 中的钩子（Hook）有何作用？<a class="headerlink" href="#pytorch-hook" title="Permanent link">&para;</a></h2>
<p>参考：<a href="https://www.zhihu.com/question/61044004">pytorch中的钩子（Hook）有何作用？</a>，知乎用户，知乎</p>
<p>首先明确一下，为什么需要用 <em>Hook</em>？</p>
<p>假设有一个函数：<script type="math/tex">x \in \mathbb{R}^2，y=x+2，z=1/2(y_1^2+y_2^2)</script> ，你想通过梯度下降求最小值。在 PyTorch 中很容易实现：</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch</span>

<span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">y</span> <span class="o">=</span> <span class="n">x</span> <span class="o">+</span> <span class="mi">2</span>
<span class="n">z</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">mean</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">pow</span><span class="p">(</span><span class="n">y</span><span class="p">,</span> <span class="mi">2</span><span class="p">))</span>
<span class="n">lr</span> <span class="o">=</span> <span class="mf">1e-3</span>
<span class="n">z</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">x</span><span class="o">.</span><span class="n">data</span> <span class="o">-=</span> <span class="n">lr</span><span class="o">*</span><span class="n">x</span><span class="o">.</span><span class="n">grad</span><span class="o">.</span><span class="n">data</span>
</code></pre></div>

<p>但问题是，如果我想要求中间变量 <script type="math/tex">y</script> 的梯度，系统会返回错误。</p>
<p>事实上，如果你输入：</p>
<div class="codehilite"><pre><span></span><code><span class="nb">type</span><span class="p">(</span><span class="n">y</span><span class="o">.</span><span class="n">grad</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">NoneType</span>
</code></pre></div>

<p>因为对于中间变量，一旦它们完成了自身反传的使命，就会被释放掉。</p>
<p>此时，<em>hook</em> 就派上用场了。简而言之，<code>register_hook</code> 的作用是，当反传时，除了完成原有的反传，额外多完成一些任务。你可以定义一个中间变量的 <em>hook</em>，将它的梯度值打印出来，当然你也可以定义一个全局列表，将每次的梯度值添加到里面去。</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch</span>

<span class="n">grad_list</span> <span class="o">=</span> <span class="p">[]</span>

<span class="k">def</span> <span class="nf">print_grad</span><span class="p">(</span><span class="n">grad</span><span class="p">):</span>
    <span class="n">grad_list</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">grad</span><span class="p">)</span>

<span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">y</span> <span class="o">=</span> <span class="n">x</span> <span class="o">+</span> <span class="mi">2</span>
<span class="n">z</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">mean</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">pow</span><span class="p">(</span><span class="n">y</span><span class="p">,</span> <span class="mi">2</span><span class="p">))</span>
<span class="n">lr</span> <span class="o">=</span> <span class="mf">1e-3</span>
<span class="n">y</span><span class="o">.</span><span class="n">register_hook</span><span class="p">(</span><span class="n">print_grad</span><span class="p">)</span>
<span class="n">z</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">x</span><span class="o">.</span><span class="n">data</span> <span class="o">-=</span> <span class="n">lr</span><span class="o">*</span><span class="n">x</span><span class="o">.</span><span class="n">grad</span><span class="o">.</span><span class="n">data</span>
</code></pre></div>

<p>当你训练一个网络，想要提取中间层的参数、或者特征图的时候，<em>hook</em> 就能派上用场了。</p>
<p>更多内容可参考：</p>
<ol>
<li><a href="https://www.kaggle.com/sironghuang/understanding-pytorch-hooks">Understanding Pytorch hooks</a></li>
<li><a href="https://oldpan.me/archives/pytorch-autograd-hook">Pytorch中autograd以及hook函数详解</a></li>
<li><a href="https://discuss.pytorch.org/t/understanding-register-forward-pre-hook-and-register-backward-hook/61457">Understanding <code>register_forward_pre_hook</code> and <code>register_backward_hook</code></a></li>
</ol>
              
            </div>
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="../torchnn/parameters/" class="btn btn-neutral float-right" title="参数 Parameters">Next <span class="icon icon-circle-arrow-right"></span></a>
      
      
        <a href="../get-started/" class="btn btn-neutral" title="60分钟快速入门"><span class="icon icon-circle-arrow-left"></span> Previous</a>
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <!-- Copyright etc -->
    
      <p>©2020 Rogerspy. All rights reserved.</p>
    
  </div>

  Built with <a href="https://www.mkdocs.org/">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</footer>
      
        </div>
      </div>

    </section>

  </div>

  <div class="rst-versions" role="note" aria-label="versions">
    <span class="rst-current-version" data-toggle="rst-current-version">
      
      
        <span><a href="../get-started/" style="color: #fcfcfc;">&laquo; Previous</a></span>
      
      
        <span style="margin-left: 15px"><a href="../torchnn/parameters/" style="color: #fcfcfc">Next &raquo;</a></span>
      
    </span>
</div>
    <script>var base_url = '..';</script>
    <script src="../js/theme.js" defer></script>
      <script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML" defer></script>
      <script src="../search/main.js" defer></script>
    <script defer>
        window.onload = function () {
            SphinxRtdTheme.Navigation.enable(true);
        };
    </script>

</body>
</html>
