<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  <meta http-equiv="X-UA-Compatible" content="IE=edge">
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <meta name="author" content="Rogerspy">
  <link rel="canonical" href="https://pytorch-zh.gitee.io/torchnn/module/">
  <link rel="shortcut icon" href="/pytorch-zh/img/favicon.ico">
  <title>Module - Pytorch 中文文档（1.4.0）</title>
  <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700" />

  <link rel="stylesheet" href="../../css/theme.css" />
  <link rel="stylesheet" href="../../css/theme_extra.css" />
  <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css" />
  
  <script>
    // Current page data
    var mkdocs_page_name = "Module";
    var mkdocs_page_input_path = "torchnn\\module.md";
    var mkdocs_page_url = "/torchnn/module/";
  </script>
  
  <script src="../../js/jquery-2.1.1.min.js" defer></script>
  <script src="../../js/modernizr-2.8.3.min.js" defer></script>
  <script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
  <script>hljs.initHighlightingOnLoad();</script> 
  
</head>

<body class="wy-body-for-nav" role="document">

  <div class="wy-grid-for-nav">

    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
    <div class="wy-side-scroll">
      <div class="wy-side-nav-search">
	    <img src='/pytorch-zh/img/logo.svg'/>
        <a href="../.." class="icon icon-home"> Pytorch 中文文档（1.4.0）</a>
        <div role="search">
  <form id ="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" title="Type search term here" />
  </form>
</div>
      </div>

      <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="../..">主页</a>
                    </li>
                </ul>
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="../../get-started/">60分钟快速入门</a>
                    </li>
                </ul>
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="../../faq/">PyTorch FAQ</a>
                    </li>
                </ul>
                <p class="caption"><span class="caption-text">两种基本结构</span></p>
                <ul class="current">
                    <li class="toctree-l1"><a class="reference internal" href="../parameters/">参数 Parameters</a>
                    </li>
                    <li class="toctree-l1 current"><a class="reference internal current" href="#">容器 Containers</a>
    <ul class="current">
                <li class="toctree-l2 current"><a class="reference internal current" href="./">Module</a>
    <ul class="current">
    <li class="toctree-l3"><a class="reference internal" href="#add_module">add_module</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#apply">apply</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#buffers">buffers</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#children">children</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#cpu">cpu</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#cuda">cuda</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#double">double</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#dump_patches">dump_patches</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#eval">eval</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#extra_repr">extra_repr</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#float">float</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#forward">forward</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#half">half</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#load_state_dict">load_state_dict</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#modules">modules</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#named_buffers">named_buffers</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#named_children">named_children</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#named_modules">named_modules</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#named_parameters">named_parameters</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#parameters">parameters</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#register_backward_hook">register_backward_hook</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#regitster_buffer">regitster_buffer</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#register_forward_hook">register_forward_hook</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#register_forward_pre_hook">register_forward_pre_hook</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#register_parameter">register_parameter</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#requires_grad_">requires_grad_</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#state_dict">state_dict</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#to">to</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#train">train</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#type">type</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#zero_grad">zero_grad</a>
    </li>
    </ul>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../sequential/">Sequential</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../modulelist/">Modulelist</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../moduledict/">Moduledict</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../parameterlist/">Parameterlist</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../parameterdict/">Parameterdict</a>
                </li>
    </ul>
                    </li>
                </ul>
                <p class="caption"><span class="caption-text">网络层</span></p>
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="#">卷积层</a>
    <ul>
                <li class="toctree-l2"><a class="reference internal" href="../conv1d/">Conv1d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../conv2d/">Conv2d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../conv3d/">Conv3d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../convtranspose1d/">Convtranspose1d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../convtranspose2d/">Convtranspose2d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../convtranspose3d/">Convtranspose3d</a>
                </li>
    </ul>
                    </li>
                </ul>
      </div>
    </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
        <a href="../..">Pytorch 中文文档（1.4.0）</a>
      </nav>

      
      <div class="wy-nav-content">
        <div class="rst-content">
          <div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
    <li><a href="../..">Docs</a> &raquo;</li>
    
      
        
          <li>容器 Containers &raquo;</li>
        
      
        
          <li>两种基本结构 &raquo;</li>
        
      
    
    <li>Module</li>
    <li class="wy-breadcrumbs-aside">
      
    </li>
  </ul>
  
  <hr/>
</div>
          <div role="main">
            <div class="section">
              
                <h2 id="module">Module<a class="headerlink" href="#module" title="Permanent link">&para;</a></h2>
<div class='important'><code>class torch.nn.Module</code></div>

<p>所有神经网络模块的基类。</p>
<p>你的模型也应该以它为基类。</p>
<p>模块中可以包含其他模块，从而将它们嵌套成树形结构。你也可以将子模块分配给常规属性。</p>
<div class='note'>实际上这个类有点像 Keras 里面的函数式模型接口。容器里面有另外一个类似 Keras 序贯模型接口的模块 <code>Sequential</code>，以及这两种模式对应的列表和字典形式。下面我们一一介绍。</div>

<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch.nn</span> <span class="k">as</span> <span class="nn">nn</span>
<span class="kn">import</span> <span class="nn">torch.nn.functional</span> <span class="k">as</span> <span class="nn">F</span>

<span class="k">class</span> <span class="nc">Model</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Model</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv1</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv2</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
</code></pre></div>

<p>以这种方式分配的子模块会被注册，并且当你调用 <code>to()</code> 方法的时候，它们的参数也会被转化。</p>
<hr />
<h3 id="add_module">add_module<a class="headerlink" href="#add_module" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">add_module</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">module</span><span class="p">)</span>
</code></pre></div>

<p>给当前模块添加孩子模块。</p>
<p>可以使用给定名称将模块作为属性访问。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>name（str）</em>：孩子模块的名称。可以通过给定名称从当前模块访问孩子模块。</li>
<li><em>module（Module）</em>：需要添加的孩子模块。    </li>
</ul>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="k">class</span> <span class="nc">Net</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Net</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_module</span><span class="p">(</span><span class="s1">&#39;conv1&#39;</span><span class="p">,</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">))</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_module</span><span class="p">(</span><span class="s1">&#39;conv2&#39;</span><span class="p">,</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">))</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv1</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv2</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
</code></pre></div>

<p>我们可以使用 <code>.named_modules()</code> 来获得刚刚给定的诶个层的名称：</p>
<div class="codehilite"><pre><span></span><code><span class="n">n</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="c1"># 获取名称</span>
<span class="nb">print</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="n">n</span><span class="o">.</span><span class="n">named_modules</span><span class="p">()))</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">&#39;=========&#39;</span><span class="p">)</span>
<span class="c1"># 或者</span>
<span class="k">for</span> <span class="n">name</span><span class="p">,</span> <span class="n">module</span> <span class="ow">in</span> <span class="n">n</span><span class="o">.</span><span class="n">named_modules</span><span class="p">():</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">name</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="p">[(</span><span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="n">Net</span><span class="p">(</span>
    <span class="p">(</span><span class="n">conv1</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
    <span class="p">(</span><span class="n">conv2</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
  <span class="p">)),</span>
 <span class="p">(</span><span class="s1">&#39;conv1&#39;</span><span class="p">,</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))),</span>
 <span class="p">(</span><span class="s1">&#39;conv2&#39;</span><span class="p">,</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">)))]</span>
<span class="o">==========</span>
<span class="n">conv1</span>
<span class="n">conv2</span>
</code></pre></div>

<p>获得相应的层：</p>
<div class="codehilite"><pre><span></span><code><span class="nb">print</span><span class="p">(</span><span class="n">n</span><span class="o">.</span><span class="n">conv1</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">n</span><span class="o">.</span><span class="n">conv2</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">&#39;=========&#39;</span><span class="p">)</span>
<span class="c1"># 或者</span>
<span class="k">for</span> <span class="n">name</span><span class="p">,</span> <span class="n">module</span> <span class="ow">in</span> <span class="n">n</span><span class="o">.</span><span class="n">named_modules</span><span class="p">():</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">module</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
<span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
<span class="o">=========</span>
<span class="n">Net</span><span class="p">(</span>
  <span class="p">(</span><span class="n">conv1</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
  <span class="p">(</span><span class="n">conv2</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
<span class="p">)</span>
<span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
<span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
</code></pre></div>

<hr />
<h3 id="apply">apply<a class="headerlink" href="#apply" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">apply</span><span class="p">(</span><span class="n">fn</span><span class="p">)</span>
</code></pre></div>

<p>将 <code>fn</code> 递归应用于每个子模块（由<code>.children()</code>返回）以及自身。 典型的用法包括初始化模型的参数（参看 <a href="https://pytorch.org/docs/stable/nn.init.html#nn-init-doc">torch.nn.init</a>）。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>fn（Module -&gt; None）</em>：应用于每个子模块的功能。</li>
</ul>
<p><strong>返回</strong></p>
<p><em>self</em></p>
<p><strong>返回类型</strong></p>
<p><em>Module</em></p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="k">def</span> <span class="nf">init_weights</span><span class="p">(</span><span class="n">m</span><span class="p">):</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">m</span><span class="p">)</span>
    <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">m</span><span class="p">)</span> <span class="o">==</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">:</span>
        <span class="n">m</span><span class="o">.</span><span class="n">weight</span><span class="o">.</span><span class="n">data</span><span class="o">.</span><span class="n">fill_</span><span class="p">(</span><span class="mf">1.0</span><span class="p">)</span>
        <span class="nb">print</span><span class="p">(</span><span class="n">m</span><span class="o">.</span><span class="n">weight</span><span class="p">)</span>

<span class="n">net</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Sequential</span><span class="p">(</span>
    <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span>
    <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
<span class="p">)</span>
<span class="n">net</span><span class="o">.</span><span class="n">apply</span><span class="p">(</span><span class="n">init_weights</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">Parameter</span> <span class="n">containing</span><span class="p">:</span>
<span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">]],</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">Parameter</span> <span class="n">containing</span><span class="p">:</span>
<span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">]],</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">Sequential</span><span class="p">(</span>
  <span class="p">(</span><span class="mi">0</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
  <span class="p">(</span><span class="mi">1</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="p">)</span>
<span class="n">Sequential</span><span class="p">(</span>
  <span class="p">(</span><span class="mi">0</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
  <span class="p">(</span><span class="mi">1</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="p">)</span>
</code></pre></div>

<hr />
<h3 id="buffers">buffers<a class="headerlink" href="#buffers" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">buffers</span><span class="p">(</span><span class="n">recurse</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</code></pre></div>

<p>返回一个模块缓冲区的迭代器。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>recurse (bool)</em>：如果为 <code>True</code>，则产生此模块和所有子模块的缓冲区。 否则，仅产生作为该模块直接成员的缓冲区。</li>
</ul>
<p><strong>返回</strong></p>
<p><em>torch.Tensor</em> ——模块缓冲区</p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="o">&gt;&gt;&gt;</span> <span class="k">for</span> <span class="n">buf</span> <span class="ow">in</span> <span class="n">model</span><span class="o">.</span><span class="n">buffers</span><span class="p">():</span>
<span class="o">&gt;&gt;&gt;</span>     <span class="nb">print</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">buf</span><span class="o">.</span><span class="n">data</span><span class="p">),</span> <span class="n">buf</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
<span class="o">&lt;</span><span class="k">class</span> <span class="err">&#39;</span><span class="nc">torch</span><span class="o">.</span><span class="n">FloatTensor</span><span class="s1">&#39;&gt; (20L,)</span>
<span class="o">&lt;</span><span class="k">class</span> <span class="err">&#39;</span><span class="nc">torch</span><span class="o">.</span><span class="n">FloatTensor</span><span class="s1">&#39;&gt; (20L, 1L, 5L, 5L) </span>
</code></pre></div>

<hr />
<h3 id="children">children<a class="headerlink" href="#children" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">children</span><span class="p">()</span>
</code></pre></div>

<p>返回子模块迭代器。</p>
<p><strong>返回</strong></p>
<p><em>Module</em>：子模块</p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="k">for</span> <span class="n">module</span> <span class="ow">in</span> <span class="n">n</span><span class="o">.</span><span class="n">children</span><span class="p">():</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">module</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
<span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
</code></pre></div>

<hr />
<h3 id="cpu">cpu<a class="headerlink" href="#cpu" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">cpu</span><span class="p">()</span>
</code></pre></div>

<p>将所有模型参数和缓冲区都移动到 CPU。</p>
<p><strong>返回</strong></p>
<p><em>self</em></p>
<p><strong>返回类型</strong></p>
<p><em>Module</em></p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="n">n</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Net</span><span class="p">(</span>                                                        
  <span class="p">(</span><span class="n">conv1</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span> 
  <span class="p">(</span><span class="n">conv2</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
<span class="p">)</span>
</code></pre></div>

<p>此时所有的模型参数和缓冲区都在 cpu。</p>
<hr />
<h3 id="cuda">cuda<a class="headerlink" href="#cuda" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">cuda</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
</code></pre></div>

<p>将所有的模型参数和缓冲区都移动到 GPU。</p>
<p>这也使得模型参数和缓冲区不同对象想关联。因此，如果模型能在 GPU 上进行训练的话 ，应该在构建优化器之前调用该方法。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>device（python: int, optional）</em>：如果指定 <em>device</em> 的话，所有参数都会拷贝到相应的 GPU 上。</li>
</ul>
<p><strong>返回</strong></p>
<p><em>self</em></p>
<p><strong>返回类型</strong></p>
<p><em>Module</em></p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="n">n</span><span class="o">.</span><span class="n">cuda</span><span class="p">()</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Net</span><span class="p">(</span>                                                        
  <span class="p">(</span><span class="n">conv1</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span> 
  <span class="p">(</span><span class="n">conv2</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
<span class="p">)</span>
</code></pre></div>

<p>此时，所有的参数和缓冲区都在 GPU 上。</p>
<hr />
<h3 id="double">double<a class="headerlink" href="#double" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">double</span><span class="p">()</span>
</code></pre></div>

<p>将所有的浮点型参数和缓冲区都转化成 <code>double</code> 数据类型。</p>
<p><strong>返回</strong></p>
<p><em>self</em></p>
<p><strong>返回类型</strong></p>
<p><em>Module</em></p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="n">x</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">n</span><span class="o">.</span><span class="n">parameters</span><span class="p">())[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span><span class="o">.</span><span class="n">data</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
<span class="n">n</span><span class="o">.</span><span class="n">double</span><span class="p">()</span>
<span class="n">x</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">n</span><span class="o">.</span><span class="n">parameters</span><span class="p">())[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span><span class="o">.</span><span class="n">data</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">torch</span><span class="o">.</span><span class="n">float32</span>
<span class="n">torch</span><span class="o">.</span><span class="n">float64</span>
</code></pre></div>

<hr />
<h3 id="dump_patches">dump_patches<a class="headerlink" href="#dump_patches" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">dump_patches</span> <span class="o">=</span> <span class="kc">False</span>
</code></pre></div>

<p>该方法为 <code>load_state_dict()</code> 提供了更好的 BC 支持。在 <code>state_dict()</code> 中，版本号会保存在返回状态表（<em>state dict</em>）的属性 <code>_metadata</code> 中，因此会被限制。<code>_metadata</code> 是字典，其 <em>键</em> 遵循状态表的命名规范。有关如何加载此类信息请参见 <code>_load_from_state_dict</code> 方法。</p>
<p>如果从一个模块中添加/删除新的参数/缓冲区，则数字增加，并且模块的 <code>_load_from_state_dict</code> 方法可以对比版本号，如果状态表来自更改之前，我们可以进行适当的修改。</p>
</blockquote>
<hr />
<h3 id="eval">eval<a class="headerlink" href="#eval" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="nb">eval</span><span class="p">()</span>
</code></pre></div>

<p>设置模型为评估模式。</p>
<p>该方法会影响任意确定的模块。了解其在训练/评估模式下的具体行为细节请参看相关模块的文档，比如<code>Dropout</code>、<code>BatchNorm</code>  等。</p>
<p>该方法等效于 <code>self.train(False)</code>。</p>
<p><strong>返回</strong></p>
<p><em>self</em></p>
<p><strong>返回类型</strong></p>
<p><em>Module</em></p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="n">n</span><span class="o">.</span><span class="n">eval</span><span class="p">()</span>
</code></pre></div>

<hr />
<h3 id="extra_repr">extra_repr<a class="headerlink" href="#extra_repr" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">extra_repr</span><span class="p">()</span>
</code></pre></div>

<p>设置模块的额外表示形式。（打印额外的信息）</p>
<p>要打印自定义的额外信息，你应该在自己的模块中重新实现此方法。 单行和多行字符串都是可以的。</p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="k">class</span> <span class="nc">Model</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Model</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv1</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv2</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="s1">&#39;Anything you like!&#39;</span>

<span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">()</span>
<span class="nb">print</span><span class="p">(</span><span class="n">model</span><span class="p">)</span>
</code></pre></div>

<p>输出:</p>
<div class="codehilite"><pre><span></span><code><span class="n">Model</span><span class="p">(</span>                                                       
  <span class="n">Anything</span> <span class="n">you</span> <span class="n">like</span><span class="err">!</span>
  <span class="p">(</span><span class="n">conv1</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>  
  <span class="p">(</span><span class="n">conv2</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span> 
<span class="p">)</span>                                                            
</code></pre></div>

<hr />
<h3 id="float">float<a class="headerlink" href="#float" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="nb">float</span><span class="p">()</span>
</code></pre></div>

<p>将所有浮点型参数和缓冲区设置成 <code>float</code> 数据类型。</p>
<p><strong>返回</strong></p>
<p><em>self</em></p>
<p><strong>返回类型</strong></p>
<p><em>Module</em></p>
</blockquote>
<hr />
<h3 id="forward">forward<a class="headerlink" href="#forward" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">forward</span><span class="p">(</span><span class="o">*</span><span class="nb">input</span><span class="p">)</span>
</code></pre></div>

<p>定义每次调用时执行的计算。</p>
<p>应该被所有子类覆盖。</p>
<p><div class='note'>尽管我们呢在此函数中定义前向传播的操作，但是在此后我们应该调用 <code>Module</code> 实例而不是 <code>forward</code> ， 因为前者负责运行所有注册的钩子（<em>registered hooks</em>），而后者 会默默的忽略它们。</div></p>
</blockquote>
<hr />
<h3 id="half">half<a class="headerlink" href="#half" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">half</span><span class="p">()</span>
</code></pre></div>

<p>将所有的浮点型参数和缓冲区都转换成 <code>half</code> 数据类型。</p>
<p><strong>返回</strong></p>
<p><em>self</em></p>
<p><strong>返回类型</strong></p>
<p><em>Module</em></p>
</blockquote>
<hr />
<h3 id="load_state_dict">load_state_dict<a class="headerlink" href="#load_state_dict" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">load_state_dict</span><span class="p">(</span><span class="n">state_dict</span><span class="p">,</span> <span class="n">strict</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</code></pre></div>

<p>将参数和缓冲区从 <code>state_dict</code> 中拷贝到此模块以及子类中。如果 <code>strict=True</code>，则 <code>state_dict()</code> 中的 <em>键</em> 必须与该模块的<code>state_dict()</code> 函数返回的 <em>键</em> 完全匹配。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>state_dict（dict）</em>：包含了参数和持久缓冲区的字典。</li>
<li><em>strict</em>：是否严格强制 <code>state_dict</code> 的 <em>键</em> 与 模块<code>state_dict()</code> 函数返回的 <em>键</em> 保持一致。</li>
</ul>
<p><strong>返回</strong></p>
<ul>
<li><em>missing_keys</em>：一个包含了丢失 <em>键</em>  的列表。</li>
<li><em>unexpected_keys</em>：一个包含了所有非期望的 <em>键</em>  的列表</li>
</ul>
<p><strong>返回类型</strong></p>
<p>有<code>missing_keys</code>和<code>unexpected_keys</code>的<code>namedTuple</code>。</p>
</blockquote>
<p>这是 PyTorch 加载预训练模型的一种方式。</p>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="c1"># 定义网络</span>
<span class="k">class</span> <span class="nc">Model</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Model</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv1</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv2</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>

<span class="c1"># 初始化模型</span>
<span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">()</span>

<span class="c1"># 初始化优化器</span>
<span class="n">optimizer</span> <span class="o">=</span> <span class="n">optim</span><span class="o">.</span><span class="n">SGD</span><span class="p">(</span><span class="n">model</span><span class="o">.</span><span class="n">parameters</span><span class="p">(),</span> <span class="n">lr</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>

<span class="c1"># 初始化损失函数</span>
<span class="n">criterion</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">CrossEntropyLoss</span><span class="p">()</span>

<span class="c1"># 训练模型</span>
<span class="k">for</span> <span class="n">epoch</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">2</span><span class="p">):</span>
    <span class="n">running_loss</span> <span class="o">=</span> <span class="mf">0.0</span>
    <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">data</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">trainloader</span><span class="p">,</span> <span class="mi">0</span><span class="p">):</span>
        <span class="n">inputs</span><span class="p">,</span> <span class="n">labels</span> <span class="o">=</span> <span class="n">data</span>  
        <span class="n">optimizer</span><span class="o">.</span><span class="n">zero_grad</span><span class="p">()</span> 
        <span class="n">outputs</span> <span class="o">=</span> <span class="n">net</span><span class="p">(</span><span class="n">inputs</span><span class="p">)</span>
        <span class="n">loss</span> <span class="o">=</span> <span class="n">criterion</span><span class="p">(</span><span class="n">outputs</span><span class="p">,</span> <span class="n">labels</span><span class="p">)</span>
        <span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>

<span class="c1"># 保存模型</span>
<span class="n">torch</span><span class="o">.</span><span class="n">save</span><span class="p">(</span><span class="n">model</span><span class="o">.</span><span class="n">state_dict</span><span class="p">(),</span> <span class="n">PATH</span><span class="p">)</span>

<span class="c1"># 加载刚刚保存的模型</span>
<span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">()</span>
<span class="n">model</span><span class="o">.</span><span class="n">load_state_dict</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">PATH</span><span class="p">))</span>
</code></pre></div>

<hr />
<h3 id="modules">modules<a class="headerlink" href="#modules" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">modules</span><span class="p">()</span>
</code></pre></div>

<p>返回网络中所有模块的迭代器。</p>
<p><strong>返回</strong></p>
<p><em>Module</em>：神经网络模块。</p>
<p><div class='note'>重复的模块只返回一次。下面的示例中 <code>l</code> 只返回一次</div></p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="n">l</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
<span class="n">net</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Sequential</span><span class="p">(</span><span class="n">l</span><span class="p">,</span> <span class="n">l</span><span class="p">)</span>
<span class="k">for</span> <span class="n">idx</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">modules</span><span class="p">()):</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">idx</span><span class="p">,</span> <span class="s1">&#39;-&gt;&#39;</span><span class="p">,</span> <span class="n">m</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="mi">0</span> <span class="o">-&gt;</span> <span class="n">Sequential</span><span class="p">(</span>
  <span class="p">(</span><span class="mi">0</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
  <span class="p">(</span><span class="mi">1</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="p">)</span>
<span class="mi">1</span> <span class="o">-&gt;</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</code></pre></div>

<hr />
<h3 id="named_buffers">named_buffers<a class="headerlink" href="#named_buffers" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">named_buffers</span><span class="p">(</span><span class="n">prefix</span><span class="o">=</span><span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="n">recurse</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</code></pre></div>

<p>返回模块缓冲区的迭代器，同时产生缓冲区的名称和缓冲区本身。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>prefix（str）</em>：预置所有缓冲区名称的前缀。</li>
<li><em>recurse（bool）</em>：如果为 <code>True</code>，则生成该模块以及所有子模块的缓冲区。否则只生成该模块的直接成员缓冲区。</li>
</ul>
<p><strong>返回</strong></p>
<p><em>（string, torch.Tensor）</em>：包含名称和缓冲区的元组。</p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="k">class</span> <span class="nc">MyModel</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">MyModel</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="n">buffer</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">)</span>  <span class="c1"># tensor</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">register_buffer</span><span class="p">(</span><span class="s1">&#39;my_buffer&#39;</span><span class="p">,</span> <span class="n">buffer</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="k">pass</span>

<span class="n">model</span> <span class="o">=</span> <span class="n">MyModel</span><span class="p">()</span>
<span class="k">for</span> <span class="n">buffer</span> <span class="ow">in</span> <span class="n">model</span><span class="o">.</span><span class="n">named_buffers</span><span class="p">():</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">buffer</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="p">(</span><span class="s1">&#39;my_buffer&#39;</span><span class="p">,</span> <span class="n">tensor</span><span class="p">([[</span><span class="o">-</span><span class="mf">0.9004</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.1200</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.4183</span><span class="p">],</span>
        <span class="p">[</span><span class="o">-</span><span class="mf">1.0403</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.6231</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.7580</span><span class="p">]]))</span>
</code></pre></div>

<hr />
<h3 id="named_children">named_children<a class="headerlink" href="#named_children" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">named_children</span><span class="p">()</span>
</code></pre></div>

<p>返回当前模块的子模块迭代器，同时产生子模块名称和子模块自身。</p>
<p><strong>返回</strong></p>
<p><em>（string, Module）</em>：包含名称和模块的元组。</p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="k">class</span> <span class="nc">MyModel</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">MyModel</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_module</span><span class="p">(</span><span class="s1">&#39;conv1&#39;</span><span class="p">,</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">))</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_module</span><span class="p">(</span><span class="s1">&#39;conv2&#39;</span><span class="p">,</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">))</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="k">pass</span>

<span class="n">model</span> <span class="o">=</span> <span class="n">MyModel</span><span class="p">()</span>
<span class="k">for</span> <span class="n">m</span> <span class="ow">in</span> <span class="n">model</span><span class="o">.</span><span class="n">named_children</span><span class="p">():</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">m</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="p">(</span><span class="s1">&#39;conv1&#39;</span><span class="p">,</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">)))</span>
<span class="p">(</span><span class="s1">&#39;conv2&#39;</span><span class="p">,</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">)))</span>
</code></pre></div>

<hr />
<h3 id="named_modules">named_modules<a class="headerlink" href="#named_modules" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">named_modules</span><span class="p">(</span><span class="n">memo</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">prefix</span><span class="o">=</span><span class="s1">&#39;&#39;</span><span class="p">)</span>
</code></pre></div>

<p>返回网络中所有模块的迭代器，产生模块名和模块本身。</p>
<p><strong>返回</strong></p>
<p><em>（string, Module）</em>：包含名称和模块的元组。</p>
<p><div class='note'>重复的模块值返回一次，下面的示例中 <code>l</code> 值返回一次。</div></p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="n">l</span> <span class="o">=</span> <span class="n">Linear</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
<span class="n">net</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Sequential</span><span class="p">(</span><span class="n">l</span><span class="p">,</span> <span class="n">l</span><span class="p">)</span>
<span class="k">for</span> <span class="n">idx</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">named_modules</span><span class="p">()):</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">idx</span><span class="p">,</span> <span class="s1">&#39;-&gt;&#39;</span><span class="p">,</span> <span class="n">m</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="mi">0</span> <span class="o">-&gt;</span> <span class="p">(</span><span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="n">Sequential</span><span class="p">(</span>
  <span class="p">(</span><span class="mi">0</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
  <span class="p">(</span><span class="mi">1</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="p">))</span>
<span class="mi">1</span> <span class="o">-&gt;</span> <span class="p">(</span><span class="s1">&#39;0&#39;</span><span class="p">,</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
</code></pre></div>

<hr />
<h3 id="named_parameters">named_parameters<a class="headerlink" href="#named_parameters" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">named_parameters</span><span class="p">(</span><span class="n">prefix</span><span class="o">=</span><span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="n">recurse</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</code></pre></div>

<p>返回模块参数的迭代器，同时产生参数名称和参数本身。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>prefix（str）</em>：所有预设参数的前缀；</li>
<li><em>recurse（bool）</em>：如果为 <code>True</code>，则产生该模块和所有子模块的参数。 否则，仅产生作为该模块直接的成员参数。</li>
</ul>
<p><strong>返回</strong></p>
<p><em>（string, Paramter）</em>：包含名称和参数的元组。</p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="k">class</span> <span class="nc">MyModel</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">MyModel</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_module</span><span class="p">(</span><span class="s1">&#39;conv1&#39;</span><span class="p">,</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">))</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_module</span><span class="p">(</span><span class="s1">&#39;conv2&#39;</span><span class="p">,</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">20</span><span class="p">,</span> <span class="mi">5</span><span class="p">))</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="k">pass</span>

<span class="n">model</span> <span class="o">=</span> <span class="n">MyModel</span><span class="p">()</span>
<span class="k">for</span> <span class="n">m</span> <span class="ow">in</span> <span class="n">model</span><span class="o">.</span><span class="n">named_parameters</span><span class="p">():</span>
    <span class="k">if</span> <span class="n">m</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;bias&#39;</span><span class="p">):</span>
        <span class="nb">print</span><span class="p">(</span><span class="n">m</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="p">(</span><span class="s1">&#39;conv1.bias&#39;</span><span class="p">,</span> <span class="n">Parameter</span> <span class="n">containing</span><span class="p">:</span>
<span class="n">tensor</span><span class="p">([</span> <span class="mf">0.1529</span><span class="p">,</span>  <span class="mf">0.0817</span><span class="p">,</span>  <span class="mf">0.0527</span><span class="p">,</span>  <span class="mf">0.0079</span><span class="p">,</span>  <span class="mf">0.1716</span><span class="p">,</span>  <span class="mf">0.0699</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0335</span><span class="p">,</span>  <span class="mf">0.0208</span><span class="p">,</span>
         <span class="mf">0.1526</span><span class="p">,</span>  <span class="mf">0.0377</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0416</span><span class="p">,</span>  <span class="mf">0.1249</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.1724</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0924</span><span class="p">,</span>  <span class="mf">0.0950</span><span class="p">,</span>  <span class="mf">0.1820</span><span class="p">,</span>
        <span class="o">-</span><span class="mf">0.0209</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.1787</span><span class="p">,</span>  <span class="mf">0.0364</span><span class="p">,</span>  <span class="mf">0.0478</span><span class="p">],</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
<span class="p">(</span><span class="s1">&#39;conv2.bias&#39;</span><span class="p">,</span> <span class="n">Parameter</span> <span class="n">containing</span><span class="p">:</span>
<span class="n">tensor</span><span class="p">([</span> <span class="mf">0.0359</span><span class="p">,</span>  <span class="mf">0.0023</span><span class="p">,</span>  <span class="mf">0.0362</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0432</span><span class="p">,</span>  <span class="mf">0.0150</span><span class="p">,</span>  <span class="mf">0.0420</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0412</span><span class="p">,</span>  <span class="mf">0.0309</span><span class="p">,</span>
        <span class="o">-</span><span class="mf">0.0110</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0417</span><span class="p">,</span>  <span class="mf">0.0128</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0324</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0230</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0350</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0379</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0256</span><span class="p">,</span>
        <span class="o">-</span><span class="mf">0.0242</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0324</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0182</span><span class="p">,</span>  <span class="mf">0.0040</span><span class="p">],</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
</code></pre></div>

<hr />
<h3 id="parameters">parameters<a class="headerlink" href="#parameters" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">parameters</span><span class="p">(</span><span class="n">recurse</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</code></pre></div>

<p>返回模块参数迭代器。</p>
<p>通常用来传递给优化器。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>recurse（bool）</em>：如果是 <code>True</code>，则返回该模块及子模块的所有参数，否则只返回当前模块的参数。</li>
</ul>
<p><strong>返回</strong></p>
<p><em>Parameter</em>：模块参数。</p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="k">for</span> <span class="n">param</span> <span class="ow">in</span> <span class="n">model</span><span class="o">.</span><span class="n">parameters</span><span class="p">():</span>
    <span class="nb">print</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">param</span><span class="o">.</span><span class="n">data</span><span class="p">),</span> <span class="n">param</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="o">&lt;</span><span class="k">class</span> <span class="err">&#39;</span><span class="nc">torch</span><span class="o">.</span><span class="n">FloatTensor</span><span class="s1">&#39;&gt; (20L,)</span>
<span class="o">&lt;</span><span class="k">class</span> <span class="err">&#39;</span><span class="nc">torch</span><span class="o">.</span><span class="n">FloatTensor</span><span class="s1">&#39;&gt; (20L, 1L, 5L, 5L)</span>
</code></pre></div>

<hr />
<h3 id="register_backward_hook">register_backward_hook<a class="headerlink" href="#register_backward_hook" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">register_backward_hook</span><span class="p">(</span><span class="n">hook</span><span class="p">)</span>
</code></pre></div>

<p>在模块上注册后向传播钩子。</p>
<p>每次计算模块输入的梯度时，都会调用该钩子。 钩子应具有以下签名：</p>
<div class="codehilite"><pre><span></span><code>hook(module, grad_input, grad_output) -&gt; Tensor or None
</code></pre></div>

<p>如果模块具有多个输入或输出，则 <code>grad_input</code> 和 <code>grad_output</code> 可能是元组。 钩子不应该修改其参数，但可以选择相对于输入返回新的梯度，该梯度将在后续计算中代替 <code>grad_input</code> 使用。</p>
<p><strong>返回</strong></p>
<p>可以通过调用 <code>handle.remove()</code> 来删除添加的钩子的句柄。</p>
<p><strong>返回类型</strong></p>
<p><code>torch.utils.hooks.RemovableHandle</code></p>
</blockquote>
<div class='warning'>对于执行许多操作的复杂 <code>Module</code>，当前实现不具有所呈现的行为。 在某些故障情况下，<code>grad_input</code> 和 <code>grad_output</code> 将仅包含输入和输出的子集的梯度。 对于此类 <code>Module</code>，应在特定的输入或输出上直接使用 <code>torch.Tensor.register_hook()</code> 以获取所需的梯度。</div>

<blockquote>
<p><code>register_backward_hook</code> 作用是获取神经网络反向传播过程中，各个模块输入端和输出端的梯度值。对于模块，其使用方式为：<code>module.register_backward_hook(hook_fn)</code> 。</p>
<p>它的输入变量分别为：模块，模块输入端的梯度，模块输出端的梯度。需要注意的是，这里的输入端和输出端，是站在前向传播的角度的，而不是反向传播的角度。例如线性模块：<script type="math/tex">o=W \times x+b</script>，其输入端为 <script type="math/tex">W，x，b</script>，输出端为 <script type="math/tex">o</script>。</p>
<p>如果模块有多个输入或者输出的话，<code>grad_input</code>和<code>grad_output</code>可以是元组类型。对于线性模块：<script type="math/tex">o=W \times x+b</script> ，它的输入端包括了<script type="math/tex">W，x，b</script>三部分，因此 <code>grad_input</code> 就是一个包含三个元素的 <em>tuple</em>。</p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">torch.nn</span> <span class="k">as</span> <span class="nn">nn</span>


<span class="k">class</span> <span class="nc">Model</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Model</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">relu1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">ReLU</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">initialize</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">initialize</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span>
                <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">,</span> <span class="mf">3.</span><span class="p">],</span>
                              <span class="p">[</span><span class="o">-</span><span class="mf">4.</span><span class="p">,</span> <span class="o">-</span><span class="mf">5.</span><span class="p">,</span> <span class="o">-</span><span class="mf">6.</span><span class="p">],</span>
                              <span class="p">[</span><span class="mf">7.</span><span class="p">,</span> <span class="mf">8.</span><span class="p">,</span> <span class="mf">9.</span><span class="p">],</span>
                              <span class="p">[</span><span class="o">-</span><span class="mf">10.</span><span class="p">,</span> <span class="o">-</span><span class="mf">11.</span><span class="p">,</span> <span class="o">-</span><span class="mf">12.</span><span class="p">]]))</span>

            <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">([</span><span class="mf">1.0</span><span class="p">,</span> <span class="mf">2.0</span><span class="p">,</span> <span class="mf">3.0</span><span class="p">,</span> <span class="mf">4.0</span><span class="p">]))</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">([[</span><span class="mf">1.0</span><span class="p">,</span> <span class="mf">2.0</span><span class="p">,</span> <span class="mf">3.0</span><span class="p">,</span> <span class="mf">4.0</span><span class="p">]]))</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">([</span><span class="mf">1.0</span><span class="p">]))</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">o</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
        <span class="n">o</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">relu1</span><span class="p">(</span><span class="n">o</span><span class="p">)</span>
        <span class="n">o</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span><span class="p">(</span><span class="n">o</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">o</span>


<span class="n">total_grad_out</span> <span class="o">=</span> <span class="p">[]</span>
<span class="n">total_grad_in</span> <span class="o">=</span> <span class="p">[]</span>


<span class="k">def</span> <span class="nf">hook_fn_backward</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">grad_input</span><span class="p">,</span> <span class="n">grad_output</span><span class="p">):</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">module</span><span class="p">)</span> <span class="c1"># 为了区分模块</span>
    <span class="c1"># 为了符合反向传播的顺序，我们先打印 grad_output</span>
    <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;grad_output&#39;</span><span class="p">,</span> <span class="n">grad_output</span><span class="p">)</span> 
    <span class="c1"># 再打印 grad_input</span>
    <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;grad_input&#39;</span><span class="p">,</span> <span class="n">grad_input</span><span class="p">)</span>
    <span class="c1"># 保存到全局变量</span>
    <span class="n">total_grad_in</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">grad_input</span><span class="p">)</span>
    <span class="n">total_grad_out</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">grad_output</span><span class="p">)</span>


<span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">()</span>

<span class="n">modules</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="n">named_children</span><span class="p">()</span>
<span class="k">for</span> <span class="n">name</span><span class="p">,</span> <span class="n">module</span> <span class="ow">in</span> <span class="n">modules</span><span class="p">:</span>
    <span class="n">module</span><span class="o">.</span><span class="n">register_backward_hook</span><span class="p">(</span><span class="n">hook_fn_backward</span><span class="p">)</span>

<span class="c1"># 这里的 requires_grad 很重要，如果不加，backward hook</span>
<span class="c1"># 执行到第一层，对 x 的导数将为 None</span>
<span class="c1"># 此外再强调一遍 x 的维度，一定不能写成 torch.Tensor([1.0, 1.0, 1.0]).requires_grad_()</span>
<span class="c1"># 否则 backward hook 会出问题。</span>
<span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">([[</span><span class="mf">1.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">]])</span><span class="o">.</span><span class="n">requires_grad_</span><span class="p">()</span>
<span class="n">o</span> <span class="o">=</span> <span class="n">model</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
<span class="n">o</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>

<span class="nb">print</span><span class="p">(</span><span class="s1">&#39;==========Saved inputs and outputs==========&#39;</span><span class="p">)</span>
<span class="k">for</span> <span class="n">idx</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">total_grad_in</span><span class="p">)):</span>
    <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;grad output: &#39;</span><span class="p">,</span> <span class="n">total_grad_out</span><span class="p">[</span><span class="n">idx</span><span class="p">])</span>
    <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;grad input: &#39;</span><span class="p">,</span> <span class="n">total_grad_in</span><span class="p">[</span><span class="n">idx</span><span class="p">])</span>
</code></pre></div>

<div class='warning'><code>register_backward_hook</code>只能操作简单模块，而不能操作包含多个子模块的复杂模块。 如果对复杂模块用了 <code>backward hook</code>，那么我们只能得到该模块最后一次简单操作的梯度信息。对于上面的代码稍作修改，不再遍历各个子模块，而是把 model 整体绑在一个 <code>hook_fn_backward</code>上：</div>

<div class="codehilite"><pre><span></span><code><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">()</span>
<span class="n">model</span><span class="o">.</span><span class="n">register_backward_hook</span><span class="p">(</span><span class="n">hook_fn_backward</span><span class="p">)</span> 
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Model</span><span class="p">(</span>
  <span class="p">(</span><span class="n">fc1</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">4</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
  <span class="p">(</span><span class="n">relu1</span><span class="p">):</span> <span class="n">ReLU</span><span class="p">()</span>
  <span class="p">(</span><span class="n">fc2</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">4</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="p">)</span>
<span class="n">grad_output</span> <span class="p">(</span><span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">]]),)</span>
<span class="n">grad_input</span> <span class="p">(</span><span class="n">tensor</span><span class="p">([</span><span class="mf">1.</span><span class="p">]),</span> <span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">,</span> <span class="mf">3.</span><span class="p">,</span> <span class="mf">4.</span><span class="p">]]),</span> <span class="n">tensor</span><span class="p">([[</span> <span class="mf">7.</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">27.</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.</span><span class="p">]]))</span>
<span class="o">==========</span><span class="n">Saved</span> <span class="n">inputs</span> <span class="ow">and</span> <span class="n">outputs</span><span class="o">==========</span>
<span class="n">grad</span> <span class="n">output</span><span class="p">:</span>  <span class="p">(</span><span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">]]),)</span>
<span class="n">grad</span> <span class="nb">input</span><span class="p">:</span>  <span class="p">(</span><span class="n">tensor</span><span class="p">([</span><span class="mf">1.</span><span class="p">]),</span> <span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">,</span> <span class="mf">3.</span><span class="p">,</span> <span class="mf">4.</span><span class="p">]]),</span> <span class="n">tensor</span><span class="p">([[</span> <span class="mf">7.</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">27.</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.</span><span class="p">]]))</span>
</code></pre></div>

<p>我们发现，程序只输出了 <em>fc2</em> 的梯度信息。</p>
<p>除此之外，有人还总结了 <em>backward hook</em> 在全连接层和卷积层表现不一致的地方（<a href="https://link.zhihu.com/?target=https%3A//github.com/pytorch/pytorch/issues/12331">Feedback about PyTorch register_backward_hook · Issue #12331 · pytorch/pytorch</a>）</p>
<blockquote>
<ul>
<li>形状</li>
</ul>
<p>① 在卷积层中，<em>weight</em> 的梯度和 <em>weight</em> 的形状相同</p>
<p>② 在全连接层中，<em>weight</em> 的梯度的形状是 <em>weight</em> 形状的转秩（观察上文中代码的输出可以验证）</p>
<ul>
<li><code>grad_input</code> 元组中各梯度的顺序</li>
</ul>
<p>① 在卷积层中，<em>bias</em> 的梯度位于元组的末尾：<code>grad_input</code> = (对*feature*的导数，对权重 <em>W</em> 的导数，对 <em>bias</em> 的导数)</p>
<p>② 在全连接层中，<em>bias</em> 的梯度位于元组的开头：<code>grad_input</code>=(对 <em>bias</em> 的导数，对 <em>feature</em> 的导数，对 <em>W</em> 的导数)</p>
<ul>
<li>当 <em>batchsize &gt; 1*时，对 *bias</em> 的梯度处理不同</li>
</ul>
<p>① 在卷积层，对  <em>bias</em>  的梯度为整个 <em>batch</em> 的数据在 <em>bias</em> 上的梯度之和：<code>grad_input</code> = (对 <em>feature</em> 的导数，对权重 <em>W</em> 的导数，对 <em>bias</em> 的导数)</p>
<p>② 在全连接层，对 bias 的梯度是分开的，<em>batch</em> 中每条数据，对应一个 <em>bias</em> 的梯度：<code>grad_input</code> = ((<em>data1</em> 对 <em>bias</em> 的导数，<em>data2</em> 对 <em>bias</em> 的导数 ...)，对 <em>feature</em> 的导数，对 <em>W</em> 的导数)</p>
</blockquote>
<hr />
<h3 id="regitster_buffer">regitster_buffer<a class="headerlink" href="#regitster_buffer" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">register_buffer</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">tensor</span><span class="p">)</span>
</code></pre></div>

<p>向模块中添加持久性缓冲区。</p>
<p>该方法通常用于注册被视为非模型参数的缓冲区。例如，<code>BatchNorm</code> 的 <code>running_mean</code> 不是参数，而是持久状态的一部分。</p>
<p>可以通过缓冲区的给定名称进行访问。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>name（str）</em>：缓冲区名称。可以使用给定名称访问模块中的缓冲区。</li>
<li><em>tensor（Tensor）</em>：需要注册的缓冲区。</li>
</ul>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="bp">self</span><span class="o">.</span><span class="n">register_buffer</span><span class="p">(</span><span class="s1">&#39;running_mean&#39;</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">num_features</span><span class="p">))</span>
</code></pre></div>

<hr />
<h3 id="register_forward_hook">register_forward_hook<a class="headerlink" href="#register_forward_hook" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">register_forward_hook</span><span class="p">(</span><span class="n">hook</span><span class="p">)</span>
</code></pre></div>

<p>在模块上注册前向传播钩子。</p>
<p>每次使用 <code>forward()</code> 计算输出以后都会调用该方法。它应该具有以下签名：</p>
<p><code>hook(module, inputs, outputs) -&gt; None or modified output</code></p>
<p>这个钩子可以修改输出。它可以原地修改输入，但不会影响前向传播，因为在调用 <code>forward()</code> 之后才会调用它。</p>
<p><strong>返回</strong></p>
<p>可以通过调用 <code>handle.remove()</code> 来删除或者添加钩子的句柄*（handle）*。</p>
<p><strong>返回类型</strong></p>
<p><code>torch.utils.hooks.RemoveableHandle</code></p>
</blockquote>
<p>借助这个 <em>hook</em>，我们可以方便地使用预训练的神经网络提取特征，而不用改变预训练网络的结构。</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">torch.nn</span> <span class="k">as</span> <span class="nn">nn</span>

<span class="c1"># 首先我们定义一个模型</span>
<span class="k">class</span> <span class="nc">Model</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Model</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">relu1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">ReLU</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">initialize</span><span class="p">()</span>

    <span class="c1"># 为了方便验证，我们将指定特殊的weight和bias</span>
    <span class="k">def</span> <span class="nf">initialize</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span>
                <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">,</span> <span class="mf">3.</span><span class="p">],</span>
                              <span class="p">[</span><span class="o">-</span><span class="mf">4.</span><span class="p">,</span> <span class="o">-</span><span class="mf">5.</span><span class="p">,</span> <span class="o">-</span><span class="mf">6.</span><span class="p">],</span>
                              <span class="p">[</span><span class="mf">7.</span><span class="p">,</span> <span class="mf">8.</span><span class="p">,</span> <span class="mf">9.</span><span class="p">],</span>
                              <span class="p">[</span><span class="o">-</span><span class="mf">10.</span><span class="p">,</span> <span class="o">-</span><span class="mf">11.</span><span class="p">,</span> <span class="o">-</span><span class="mf">12.</span><span class="p">]]))</span>

            <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">([</span><span class="mf">1.0</span><span class="p">,</span> <span class="mf">2.0</span><span class="p">,</span> <span class="mf">3.0</span><span class="p">,</span> <span class="mf">4.0</span><span class="p">]))</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">([[</span><span class="mf">1.0</span><span class="p">,</span> <span class="mf">2.0</span><span class="p">,</span> <span class="mf">3.0</span><span class="p">,</span> <span class="mf">4.0</span><span class="p">]]))</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">([</span><span class="mf">1.0</span><span class="p">]))</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">o</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
        <span class="n">o</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">relu1</span><span class="p">(</span><span class="n">o</span><span class="p">)</span>
        <span class="n">o</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span><span class="p">(</span><span class="n">o</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">o</span>

<span class="c1"># 全局变量，用于存储中间层的 feature</span>
<span class="n">total_feat_out</span> <span class="o">=</span> <span class="p">[]</span>
<span class="n">total_feat_in</span> <span class="o">=</span> <span class="p">[]</span>

<span class="c1"># 定义 forward hook function</span>
<span class="k">def</span> <span class="nf">hook_fn_forward</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="nb">input</span><span class="p">,</span> <span class="n">output</span><span class="p">):</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">module</span><span class="p">)</span> <span class="c1"># 用于区分模块</span>
    <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;input&#39;</span><span class="p">,</span> <span class="nb">input</span><span class="p">)</span> <span class="c1"># 首先打印出来</span>
    <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;output&#39;</span><span class="p">,</span> <span class="n">output</span><span class="p">)</span>
    <span class="n">total_feat_out</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">output</span><span class="p">)</span> <span class="c1"># 然后分别存入全局 list 中</span>
    <span class="n">total_feat_in</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>


<span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">()</span>

<span class="n">modules</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="n">named_children</span><span class="p">()</span> <span class="c1"># </span>
<span class="k">for</span> <span class="n">name</span><span class="p">,</span> <span class="n">module</span> <span class="ow">in</span> <span class="n">modules</span><span class="p">:</span>
    <span class="n">module</span><span class="o">.</span><span class="n">register_forward_hook</span><span class="p">(</span><span class="n">hook_fn_forward</span><span class="p">)</span>

<span class="c1"># 注意下面代码中 x 的维度，对于linear module，输入一定是大于等于二维的</span>
<span class="c1"># （第一维是 batch size）。在 forward hook 中看不出来，但是 backward hook 中，</span>
<span class="c1"># 得到的梯度完全不对。</span>
<span class="c1"># 有一篇 hook 的教程就是这里出了错，作者还强行解释</span>

<span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">([[</span><span class="mf">1.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">]])</span><span class="o">.</span><span class="n">requires_grad_</span><span class="p">()</span> 
<span class="n">o</span> <span class="o">=</span> <span class="n">model</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
<span class="n">o</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>

<span class="nb">print</span><span class="p">(</span><span class="s1">&#39;==========Saved inputs and outputs==========&#39;</span><span class="p">)</span>
<span class="k">for</span> <span class="n">idx</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">total_feat_in</span><span class="p">)):</span>
    <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;input: &#39;</span><span class="p">,</span> <span class="n">total_feat_in</span><span class="p">[</span><span class="n">idx</span><span class="p">])</span>
    <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;output: &#39;</span><span class="p">,</span> <span class="n">total_feat_out</span><span class="p">[</span><span class="n">idx</span><span class="p">])</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">3</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">4</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="nb">input</span> <span class="p">(</span><span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">]],</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">),)</span>
<span class="n">output</span> <span class="n">tensor</span><span class="p">([[</span>  <span class="mf">7.</span><span class="p">,</span> <span class="o">-</span><span class="mf">13.</span><span class="p">,</span>  <span class="mf">27.</span><span class="p">,</span> <span class="o">-</span><span class="mf">29.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">AddmmBackward</span><span class="o">&gt;</span><span class="p">)</span>
<span class="n">ReLU</span><span class="p">()</span>
<span class="nb">input</span> <span class="p">(</span><span class="n">tensor</span><span class="p">([[</span>  <span class="mf">7.</span><span class="p">,</span> <span class="o">-</span><span class="mf">13.</span><span class="p">,</span>  <span class="mf">27.</span><span class="p">,</span> <span class="o">-</span><span class="mf">29.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">AddmmBackward</span><span class="o">&gt;</span><span class="p">),)</span>
<span class="n">output</span> <span class="n">tensor</span><span class="p">([[</span> <span class="mf">7.</span><span class="p">,</span>  <span class="mf">0.</span><span class="p">,</span> <span class="mf">27.</span><span class="p">,</span>  <span class="mf">0.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">ReluBackward0</span><span class="o">&gt;</span><span class="p">)</span>
<span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">4</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="nb">input</span> <span class="p">(</span><span class="n">tensor</span><span class="p">([[</span> <span class="mf">7.</span><span class="p">,</span>  <span class="mf">0.</span><span class="p">,</span> <span class="mf">27.</span><span class="p">,</span>  <span class="mf">0.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">ReluBackward0</span><span class="o">&gt;</span><span class="p">),)</span>
<span class="n">output</span> <span class="n">tensor</span><span class="p">([[</span><span class="mf">89.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">AddmmBackward</span><span class="o">&gt;</span><span class="p">)</span>
<span class="o">==========</span><span class="n">Saved</span> <span class="n">inputs</span> <span class="ow">and</span> <span class="n">outputs</span><span class="o">==========</span>
<span class="nb">input</span><span class="p">:</span>  <span class="p">(</span><span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">]],</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">),)</span>
<span class="n">output</span><span class="p">:</span>  <span class="n">tensor</span><span class="p">([[</span>  <span class="mf">7.</span><span class="p">,</span> <span class="o">-</span><span class="mf">13.</span><span class="p">,</span>  <span class="mf">27.</span><span class="p">,</span> <span class="o">-</span><span class="mf">29.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">AddmmBackward</span><span class="o">&gt;</span><span class="p">)</span>
<span class="nb">input</span><span class="p">:</span>  <span class="p">(</span><span class="n">tensor</span><span class="p">([[</span>  <span class="mf">7.</span><span class="p">,</span> <span class="o">-</span><span class="mf">13.</span><span class="p">,</span>  <span class="mf">27.</span><span class="p">,</span> <span class="o">-</span><span class="mf">29.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">AddmmBackward</span><span class="o">&gt;</span><span class="p">),)</span>
<span class="n">output</span><span class="p">:</span>  <span class="n">tensor</span><span class="p">([[</span> <span class="mf">7.</span><span class="p">,</span>  <span class="mf">0.</span><span class="p">,</span> <span class="mf">27.</span><span class="p">,</span>  <span class="mf">0.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">ReluBackward0</span><span class="o">&gt;</span><span class="p">)</span>
<span class="nb">input</span><span class="p">:</span>  <span class="p">(</span><span class="n">tensor</span><span class="p">([[</span> <span class="mf">7.</span><span class="p">,</span>  <span class="mf">0.</span><span class="p">,</span> <span class="mf">27.</span><span class="p">,</span>  <span class="mf">0.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">ReluBackward0</span><span class="o">&gt;</span><span class="p">),)</span>
<span class="n">output</span><span class="p">:</span>  <span class="n">tensor</span><span class="p">([[</span><span class="mf">89.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">AddmmBackward</span><span class="o">&gt;</span><span class="p">)</span>
</code></pre></div>

<hr />
<h3 id="register_forward_pre_hook">register_forward_pre_hook<a class="headerlink" href="#register_forward_pre_hook" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">register_forward_pre_hook</span><span class="p">(</span><span class="n">hook</span><span class="p">)</span>
</code></pre></div>

<p>在模块上注册一个前向传播的 <em>pre-hook</em>。</p>
<p>每次调用 <code>forward()</code> 之前都会调用该钩子。它应该具有以下签名：</p>
<p><code>hook(module, input) -&gt; None or modified input</code></p>
<p>挂钩可以修改输入。 用户可以在挂钩中返回一个元组或一个修改后的值。 如果返回单个值，则将值包装到一个元组中（除非该值已经是一个元组）。</p>
<p><strong>返回</strong></p>
<p>可以通过调用 <code>handle.remove()</code> 来移除添加的钩子的句柄。</p>
<p><strong>返回类型</strong></p>
<p><code>torch.utils.hooks.RemovableHandle</code></p>
</blockquote>
<hr />
<h3 id="register_parameter">register_parameter<a class="headerlink" href="#register_parameter" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">register_parameter</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">param</span><span class="p">)</span>
</code></pre></div>

<p>给模块添加参数。</p>
<p>可以通过给定名称作为属性访问参数。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>name（string）</em>：参数名。可以通过给定名称从模块中获得参数。</li>
<li><em>param（Parameter）</em>：需要添加到模块的参数。</li>
</ul>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="k">class</span> <span class="nc">Model</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Model</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="n">weight</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">))</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">register_parameter</span><span class="p">(</span><span class="s1">&#39;add_weight&#39;</span><span class="p">,</span> <span class="n">weight</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="k">pass</span>
</code></pre></div>

<p>上面的例子中，我们不能将 <code>weight</code> 作为属性直接访问，但可以将 <code>add_weight</code> 作为属性直接访问。</p>
<div class="codehilite"><pre><span></span><code><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">()</span>
<span class="nb">print</span><span class="p">(</span><span class="n">model</span><span class="o">.</span><span class="n">weight</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">&#39;==========&#39;</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">model</span><span class="o">.</span><span class="n">add_weight</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="ne">AttributeError</span><span class="p">:</span> <span class="s1">&#39;Model&#39;</span> <span class="nb">object</span> <span class="n">has</span> <span class="n">no</span> <span class="n">attribute</span> <span class="s1">&#39;weight&#39;</span>
<span class="o">==========</span>
<span class="n">Parameter</span> <span class="n">containing</span><span class="p">:</span>
<span class="n">tensor</span><span class="p">([[</span> <span class="mf">0.0621</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.9504</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.2510</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.5620</span><span class="p">]],</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</code></pre></div>

<hr />
<h3 id="requires_grad_">requires_grad_<a class="headerlink" href="#requires_grad_" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">requires_grad_</span><span class="p">(</span><span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</code></pre></div>

<p>改变 <code>autograd</code> 是否要记录当前模块中参数的操作。</p>
<p>该方法原地设置参数的 <code>requires_grad</code> 属性。</p>
<p>此方法有助于冻结模块的一部分以便分别单独微调或训练模型的各个部分（例如 GAN 训练）。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>requires_grad（bool）</em>：<code>autograd</code> 是否应该记录该模块上参数的操作。默认为 <code>True</code>。</li>
</ul>
<p><strong>返回</strong></p>
<p><em>self</em></p>
<p><strong>返回类型</strong></p>
<p><em>Module</em></p>
</blockquote>
<hr />
<h3 id="state_dict">state_dict<a class="headerlink" href="#state_dict" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">state_dict</span><span class="p">(</span><span class="n">destination</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">prefix</span><span class="o">=</span><span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="n">keep_vars</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
</code></pre></div>

<p>返回包含整个模块状态的字典。</p>
<p>参数和持久缓冲区（比如 <em>running averages</em>）都包括在内。<em>键</em> 是对应的参数和缓冲区的名称。</p>
<p><strong>返回</strong></p>
<p>包含整个模块状态的字典。</p>
<p><strong>返回类型</strong></p>
<p><em>dict</em></p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="n">module</span><span class="o">.</span><span class="n">state_dict</span><span class="p">()</span><span class="o">.</span><span class="n">keys</span><span class="p">()</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="p">[</span><span class="s1">&#39;bias&#39;</span><span class="p">,</span> <span class="s1">&#39;weight&#39;</span><span class="p">]</span>
</code></pre></div>

<hr />
<h3 id="to">to<a class="headerlink" href="#to" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">to</span><span class="p">(</span><span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
</code></pre></div>

<p>移动参数和缓冲区。</p>
<p>该方法可以通过以下方式调用：</p>
<div class="codehilite"><pre><span></span><code><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">non_blocking</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
</code></pre></div>

<div class="codehilite"><pre><span></span><code><span class="n">to</span><span class="p">(</span><span class="n">dtype</span><span class="p">,</span> <span class="n">non_blocking</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
</code></pre></div>

<div class="codehilite"><pre><span></span><code><span class="n">to</span><span class="p">(</span><span class="n">tensor</span><span class="p">,</span> <span class="n">non_blocking</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
</code></pre></div>

<p>它的签名类似于 <code>torch.Tensor.to()</code>，但是只接收浮点型数据类型。另外，该方法只会将浮点型参数和缓冲区转换成 <code>dtype</code>。如果给定，那么整个参数和缓冲区都会被移动到 <code>device</code> 里，但不会改变数据类型。当 <code>non_blocking</code> 被设置时，如果可能，它会尝试相对于主机进行异步转换/移动，例如，将具有固定内存的 CPU 张量移动到 CUDA 设备。</p>
<p>见下面的例子。</p>
<p><div class='note'>该方法会原地修改模块。</div></p>
<p><strong>参数</strong></p>
<ul>
<li><em>device（torch.device）</em>：参数和缓冲区要移动到的设备。</li>
<li><em>dtype（torch.dtype）</em>：模块中浮点参数和缓冲区所需要的浮点类型</li>
<li><em>tensor（torch.Tensor）</em>：满足该模块所有参数和缓冲区所需 <code>dtype</code> 和 <code>device</code> 的张量。</li>
</ul>
<p><strong>返回</strong></p>
<p><em>self</em></p>
<p><strong>返回类型</strong></p>
<p><em>Module</em></p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="o">&gt;&gt;&gt;</span> <span class="n">linear</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
<span class="o">&gt;&gt;&gt;</span> <span class="n">linear</span><span class="o">.</span><span class="n">weight</span>
<span class="n">Parameter</span> <span class="n">containing</span><span class="p">:</span>
<span class="n">tensor</span><span class="p">([[</span> <span class="mf">0.1913</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.3420</span><span class="p">],</span>
        <span class="p">[</span><span class="o">-</span><span class="mf">0.5113</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.2325</span><span class="p">]])</span>
<span class="o">&gt;&gt;&gt;</span> <span class="n">linear</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">double</span><span class="p">)</span>
<span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="o">&gt;&gt;&gt;</span> <span class="n">linear</span><span class="o">.</span><span class="n">weight</span>
<span class="n">Parameter</span> <span class="n">containing</span><span class="p">:</span>
<span class="n">tensor</span><span class="p">([[</span> <span class="mf">0.1913</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.3420</span><span class="p">],</span>
        <span class="p">[</span><span class="o">-</span><span class="mf">0.5113</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.2325</span><span class="p">]],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float64</span><span class="p">)</span>
<span class="o">&gt;&gt;&gt;</span> <span class="n">gpu1</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">&quot;cuda:1&quot;</span><span class="p">)</span>
<span class="o">&gt;&gt;&gt;</span> <span class="n">linear</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">gpu1</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">half</span><span class="p">,</span> <span class="n">non_blocking</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="o">&gt;&gt;&gt;</span> <span class="n">linear</span><span class="o">.</span><span class="n">weight</span>
<span class="n">Parameter</span> <span class="n">containing</span><span class="p">:</span>
<span class="n">tensor</span><span class="p">([[</span> <span class="mf">0.1914</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.3420</span><span class="p">],</span>
        <span class="p">[</span><span class="o">-</span><span class="mf">0.5112</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.2324</span><span class="p">]],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float16</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="s1">&#39;cuda:1&#39;</span><span class="p">)</span>
<span class="o">&gt;&gt;&gt;</span> <span class="n">cpu</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">&quot;cpu&quot;</span><span class="p">)</span>
<span class="o">&gt;&gt;&gt;</span> <span class="n">linear</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">cpu</span><span class="p">)</span>
<span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="o">&gt;&gt;&gt;</span> <span class="n">linear</span><span class="o">.</span><span class="n">weight</span>
<span class="n">Parameter</span> <span class="n">containing</span><span class="p">:</span>
<span class="n">tensor</span><span class="p">([[</span> <span class="mf">0.1914</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.3420</span><span class="p">],</span>
        <span class="p">[</span><span class="o">-</span><span class="mf">0.5112</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.2324</span><span class="p">]],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float16</span><span class="p">)</span>
</code></pre></div>

<hr />
<h3 id="train">train<a class="headerlink" href="#train" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">train</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</code></pre></div>

<p>设置模块的训练模式。</p>
<p>这只在特定模块上有所影响。如果它们受影响，更多模块在训练/评估模式下的行为细节可以查看特定模块的文档，比如，<code>Dropout</code>，<code>BatchNorm</code> 等。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>mode（bool）</em>：设置训练模式（<code>True</code>）或者评估模式（<code>False</code>）。默认为 <code>True</code>。</li>
</ul>
<p><strong>返回</strong></p>
<p><em>self</em></p>
<p><strong>返回类型</strong></p>
<p><em>Module</em></p>
</blockquote>
<p><strong>示例</strong></p>
<div class="codehilite"><pre><span></span><code><span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">()</span>
</code></pre></div>

<hr />
<h3 id="type">type<a class="headerlink" href="#type" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="nb">type</span><span class="p">(</span><span class="n">dst_type</span><span class="p">)</span>
</code></pre></div>

<p>将所有参数和缓冲区都设置成 <code>dst_type</code>。</p>
<p><strong>参数</strong></p>
<ul>
<li><em>dst_type（python:type or string）</em>：所需类型。</li>
</ul>
<p><strong>返回</strong></p>
<p><em>self</em></p>
<p><strong>返回类型</strong></p>
<p><em>Module</em></p>
</blockquote>
<hr />
<h3 id="zero_grad">zero_grad<a class="headerlink" href="#zero_grad" title="Permanent link">&para;</a></h3>
<blockquote>
<div class="codehilite"><pre><span></span><code><span class="n">zero_grad</span><span class="p">()</span>
</code></pre></div>

<p>设置所有模型参数的梯度为 0。</p>
</blockquote>
              
            </div>
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="../sequential/" class="btn btn-neutral float-right" title="Sequential">Next <span class="icon icon-circle-arrow-right"></span></a>
      
      
        <a href="../parameters/" class="btn btn-neutral" title="参数 Parameters"><span class="icon icon-circle-arrow-left"></span> Previous</a>
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <!-- Copyright etc -->
    
      <p>©2020 Rogerspy. All rights reserved.</p>
    
  </div>

  Built with <a href="https://www.mkdocs.org/">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</footer>
      
        </div>
      </div>

    </section>

  </div>

  <div class="rst-versions" role="note" aria-label="versions">
    <span class="rst-current-version" data-toggle="rst-current-version">
      
      
        <span><a href="../parameters/" style="color: #fcfcfc;">&laquo; Previous</a></span>
      
      
        <span style="margin-left: 15px"><a href="../sequential/" style="color: #fcfcfc">Next &raquo;</a></span>
      
    </span>
</div>
    <script>var base_url = '../..';</script>
    <script src="../../js/theme.js" defer></script>
      <script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML" defer></script>
      <script src="../../search/main.js" defer></script>
    <script defer>
        window.onload = function () {
            SphinxRtdTheme.Navigation.enable(true);
        };
    </script>

</body>
</html>
