<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  <meta http-equiv="X-UA-Compatible" content="IE=edge">
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <meta name="author" content="Rogerspy">
  <link rel="canonical" href="https://pytorch-zh.gitee.io/get-started/">
  <link rel="shortcut icon" href="/pytorch-zh/img/favicon.ico">
  <title>60分钟快速入门 - Pytorch 中文文档（1.4.0）</title>
  <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700" />

  <link rel="stylesheet" href="../css/theme.css" />
  <link rel="stylesheet" href="../css/theme_extra.css" />
  <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css" />
  
  <script>
    // Current page data
    var mkdocs_page_name = "60\u5206\u949f\u5feb\u901f\u5165\u95e8";
    var mkdocs_page_input_path = "get-started\\index.md";
    var mkdocs_page_url = "/get-started/";
  </script>
  
  <script src="../js/jquery-2.1.1.min.js" defer></script>
  <script src="../js/modernizr-2.8.3.min.js" defer></script>
  <script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
  <script>hljs.initHighlightingOnLoad();</script> 
  
</head>

<body class="wy-body-for-nav" role="document">

  <div class="wy-grid-for-nav">

    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
    <div class="wy-side-scroll">
      <div class="wy-side-nav-search">
	    <img src='/pytorch-zh/img/logo.svg'/>
        <a href=".." class="icon icon-home"> Pytorch 中文文档（1.4.0）</a>
        <div role="search">
  <form id ="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" title="Type search term here" />
  </form>
</div>
      </div>

      <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="..">主页</a>
                    </li>
                </ul>
                <ul class="current">
                    <li class="toctree-l1 current"><a class="reference internal current" href="./">60分钟快速入门</a>
    <ul class="current">
    <li class="toctree-l2"><a class="reference internal" href="#pytorch">什么是PyTorch?</a>
        <ul>
    <li class="toctree-l3"><a class="reference internal" href="#tensor">张量（Tensor）</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#_1">运算操作</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#numpy">桥接 Numpy</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#cuda">CUDA 上的张量</a>
    </li>
        </ul>
    </li>
    <li class="toctree-l2"><a class="reference internal" href="#autograd">Autograd：自动微分</a>
        <ul>
    <li class="toctree-l3"><a class="reference internal" href="#_2">张量</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#_3">梯度</a>
    </li>
        </ul>
    </li>
    <li class="toctree-l2"><a class="reference internal" href="#_4">神经网络</a>
        <ul>
    <li class="toctree-l3"><a class="reference internal" href="#_5">定义网络</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#_6">损失函数</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#_7">反向传播</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#_8">更新网络权重</a>
    </li>
        </ul>
    </li>
    <li class="toctree-l2"><a class="reference internal" href="#_9">训练分类器</a>
        <ul>
    <li class="toctree-l3"><a class="reference internal" href="#_10">数据呢？</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#_11">训练一个图像分类器</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#1-cifar-10">1. 加载和归一化 CIFAR 10</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#2">2. 定义卷积神经网络</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#3">3. 定义损失函数和优化器</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#4">4. 训练神经网络</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#5">5. 在测试集上测试模型</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#6-gpu">6. 在GPU上训练</a>
    </li>
        </ul>
    </li>
    <li class="toctree-l2"><a class="reference internal" href="#gpu">数据并行（多GPU训练）</a>
        <ul>
    <li class="toctree-l3"><a class="reference internal" href="#_12">导入和参数</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#_13">虚拟数据集</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#_14">简单的模型</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#dataparallel">创建模型和DataParallel</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#_15">运行模型</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#_16">结果</a>
    </li>
    <li class="toctree-l3"><a class="reference internal" href="#_17">总结</a>
    </li>
        </ul>
    </li>
    <li class="toctree-l2"><a class="reference internal" href="#_18">更多资料和学习资源</a>
    </li>
    </ul>
                    </li>
                </ul>
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="../faq/">PyTorch FAQ</a>
                    </li>
                </ul>
                <p class="caption"><span class="caption-text">两种基本结构</span></p>
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="../torchnn/parameters/">参数 Parameters</a>
                    </li>
                    <li class="toctree-l1"><a class="reference internal" href="#">容器 Containers</a>
    <ul>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/module/">Module</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/sequential/">Sequential</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/modulelist/">Modulelist</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/moduledict/">Moduledict</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/parameterlist/">Parameterlist</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/parameterdict/">Parameterdict</a>
                </li>
    </ul>
                    </li>
                </ul>
                <p class="caption"><span class="caption-text">网络层</span></p>
                <ul>
                    <li class="toctree-l1"><a class="reference internal" href="#">卷积层</a>
    <ul>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/conv1d/">Conv1d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/conv2d/">Conv2d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/conv3d/">Conv3d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/convtranspose1d/">Convtranspose1d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/convtranspose2d/">Convtranspose2d</a>
                </li>
                <li class="toctree-l2"><a class="reference internal" href="../torchnn/convtranspose3d/">Convtranspose3d</a>
                </li>
    </ul>
                    </li>
                </ul>
      </div>
    </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
        <a href="..">Pytorch 中文文档（1.4.0）</a>
      </nav>

      
      <div class="wy-nav-content">
        <div class="rst-content">
          <div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
    <li><a href="..">Docs</a> &raquo;</li>
    
      
    
    <li>60分钟快速入门</li>
    <li class="wy-breadcrumbs-aside">
      
    </li>
  </ul>
  
  <hr/>
</div>
          <div role="main">
            <div class="section">
              
                <h1 id="pytorch-60">使用 PyTorch 进行深度学习：60 分钟的闪电战<a class="headerlink" href="#pytorch-60" title="Permanent link">&para;</a></h1>
<p>作者： <a href="http://soumith.ch/">Soumith Chintala</a></p>
<p>参考：<a href="http://shang.qq.com/wpa/qunwpa?idkey=349eb1bbaeeff1cf20408899cbe75669132ef145ff5ee6599f78a77dd144c367">PyTorch 中文翻译组 | ApacheCN 713436582</a></p>
<iframe src="//player.bilibili.com/player.html?aid=62250140&bvid=BV1Tt411M7k7&cid=108203390&page=1" scrolling="no" width="100%" height="600px" border="0" frameborder="no" framespacing="0" allowfullscreen="true"> </iframe>

<p>​        </p>
<p>本教程目的：</p>
<ul>
<li>全面了解 PyTorch 的 Tensor 库和神经网络。</li>
<li>训练一个小型神经网络对图像进行分类。</li>
</ul>
<p>本教程假设您对<code>numpy</code>有基本的了解。</p>
<div class='container' style='margin-top:40px;margin-bottom:20px;'>
    <div style='background-color:#54c7ec;height:36px;line-height:36px;vertical-align:middle;'>
        <div style='margin-left:10px'>
            <font color='white' size=4>
                • 注意
            </font>
        </div>
    </div>
    <div style='background-color:#F3F4F7'>
        <div style='padding:15px 10px 15px 20px;line-height:1.5;'>
            请确保您已正确安装torch和torchvision软件包。
        </div>    
    </div>    
</div>

<h2 id="pytorch">什么是PyTorch?<a class="headerlink" href="#pytorch" title="Permanent link">&para;</a></h2>
<p>PyTorch是一个基于python的科学计算包，主要针对两类人群：</p>
<ul>
<li>作为NumPy的替代品，可以利用GPU的性能进行计算</li>
<li>作为一个高灵活性、速度快的深度学习平台</li>
</ul>
<h3 id="tensor">张量（Tensor）<a class="headerlink" href="#tensor" title="Permanent link">&para;</a></h3>
<p>张量与numpy的ndarray非常相似，不同的是张量可以使用GPU进行加速运算。</p>
<div class="codehilite"><pre><span></span><code><span class="c1"># 导入pytorch包</span>
<span class="kn">from</span> <span class="nn">__future__</span> <span class="kn">import</span> <span class="n">print_function</span>
<span class="kn">import</span> <span class="nn">torch</span>
</code></pre></div>

<ul>
<li>创建一个5*3的矩阵：</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">empty</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">3</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="mf">9.2755e-39</span><span class="p">,</span> <span class="mf">1.0561e-38</span><span class="p">,</span> <span class="mf">1.0469e-38</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">9.4592e-39</span><span class="p">,</span> <span class="mf">1.0469e-38</span><span class="p">,</span> <span class="mf">1.0286e-38</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">8.4490e-39</span><span class="p">,</span> <span class="mf">9.6428e-39</span><span class="p">,</span> <span class="mf">1.1112e-38</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">9.5511e-39</span><span class="p">,</span> <span class="mf">1.0102e-38</span><span class="p">,</span> <span class="mf">1.0286e-38</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">1.0194e-38</span><span class="p">,</span> <span class="mf">9.6429e-39</span><span class="p">,</span> <span class="mf">9.2755e-39</span><span class="p">]])</span>
</code></pre></div>

<ul>
<li>创建一个随机初始化的矩阵：</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">3</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="mf">0.7051</span><span class="p">,</span> <span class="mf">0.4652</span><span class="p">,</span> <span class="mf">0.4215</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.5618</span><span class="p">,</span> <span class="mf">0.3053</span><span class="p">,</span> <span class="mf">0.4522</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.7477</span><span class="p">,</span> <span class="mf">0.5481</span><span class="p">,</span> <span class="mf">0.2799</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.3495</span><span class="p">,</span> <span class="mf">0.5547</span><span class="p">,</span> <span class="mf">0.9938</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.8753</span><span class="p">,</span> <span class="mf">0.0308</span><span class="p">,</span> <span class="mf">0.7804</span><span class="p">]])</span>
</code></pre></div>

<ul>
<li>创建一个全0且数据类型为long的矩阵：</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">long</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span>
        <span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span>
        <span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span>
        <span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span>
        <span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">]])</span>
</code></pre></div>

<ul>
<li>直接使用数据创建一个张量：</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mf">5.5</span><span class="p">,</span> <span class="mi">3</span><span class="p">])</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([</span><span class="mf">5.5000</span><span class="p">,</span> <span class="mf">3.0000</span><span class="p">])</span>
</code></pre></div>

<ul>
<li>根据已经存在张量创建新的张量。这些方法会复用输入张量的性质，比如dtype，除非用户提供新的性质</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="n">x</span> <span class="o">=</span> <span class="n">x</span><span class="o">.</span><span class="n">new_ones</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">double</span><span class="p">)</span>    <span class="c1"># new_* 方法需要输入尺寸</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>

<span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn_like</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float</span><span class="p">)</span>  <span class="c1"># *_like 方法不需要输入尺寸</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">]],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float64</span><span class="p">)</span>
<span class="n">tensor</span><span class="p">([[</span><span class="o">-</span><span class="mf">0.9474</span><span class="p">,</span>  <span class="mf">0.1766</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.6757</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.5141</span><span class="p">,</span>  <span class="mf">0.5636</span><span class="p">,</span>  <span class="mf">0.1980</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.2313</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.0284</span><span class="p">,</span>  <span class="mf">1.7724</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.3084</span><span class="p">,</span>  <span class="mf">0.4280</span><span class="p">,</span>  <span class="mf">0.5484</span><span class="p">],</span>
        <span class="p">[</span><span class="o">-</span><span class="mf">0.1561</span><span class="p">,</span>  <span class="mf">0.5803</span><span class="p">,</span>  <span class="mf">0.2222</span><span class="p">]])</span>
</code></pre></div>

<ul>
<li>获取张量的尺寸：</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">5</span><span class="p">,</span> <span class="mi">3</span><span class="p">])</span>
</code></pre></div>

<div class='container' style='margin-top:40px;margin-bottom:20px;'>
    <div style='background-color:#54c7ec;height:36px;line-height:36px;vertical-align:middle;'>
        <div style='margin-left:10px'>
            <font color='white' size=4>
                • 注意
            </font>
        </div>
    </div>
    <div style='background-color:#F3F4F7'>
        <div style='padding:15px 10px 15px 20px;line-height:1.5;'>
            torch.Size实际上是一个tuple， 因此它支持所有tuple的操作。
        </div>    
    </div>    
</div>

<h3 id="_1">运算操作<a class="headerlink" href="#_1" title="Permanent link">&para;</a></h3>
<p>PyTorch有很多运算语法。下面的例子中，我们先看下加法运算。</p>
<ul>
<li>加法：语法1</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="n">y</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">3</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span> <span class="o">+</span> <span class="n">y</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="o">-</span><span class="mf">0.1817</span><span class="p">,</span>  <span class="mf">1.1493</span><span class="p">,</span>  <span class="mf">0.1414</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.9347</span><span class="p">,</span>  <span class="mf">1.1595</span><span class="p">,</span>  <span class="mf">0.2592</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.8474</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.9216</span><span class="p">,</span>  <span class="mf">2.6888</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.8479</span><span class="p">,</span>  <span class="mf">0.7964</span><span class="p">,</span>  <span class="mf">0.5882</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.5055</span><span class="p">,</span>  <span class="mf">1.2544</span><span class="p">,</span>  <span class="mf">0.6216</span><span class="p">]])</span>
</code></pre></div>

<ul>
<li>加法：语法2</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="nb">print</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">))</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="o">-</span><span class="mf">0.1817</span><span class="p">,</span>  <span class="mf">1.1493</span><span class="p">,</span>  <span class="mf">0.1414</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.9347</span><span class="p">,</span>  <span class="mf">1.1595</span><span class="p">,</span>  <span class="mf">0.2592</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.8474</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.9216</span><span class="p">,</span>  <span class="mf">2.6888</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.8479</span><span class="p">,</span>  <span class="mf">0.7964</span><span class="p">,</span>  <span class="mf">0.5882</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">0.5055</span><span class="p">,</span>  <span class="mf">1.2544</span><span class="p">,</span>  <span class="mf">0.6216</span><span class="p">]])</span>
</code></pre></div>

<ul>
<li>加法：提供一个输出张量作为参数</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="n">result</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">empty</span><span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="mi">3</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">result</span><span class="p">)</span>
<span class="n">torch</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">result</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">result</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="o">-</span><span class="mf">3.5000e+00</span><span class="p">,</span>  <span class="mf">6.8664e-43</span><span class="p">,</span> <span class="o">-</span><span class="mf">3.5000e+00</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">6.8664e-43</span><span class="p">,</span> <span class="o">-</span><span class="mf">3.5000e+00</span><span class="p">,</span>  <span class="mf">6.8664e-43</span><span class="p">],</span>
        <span class="p">[</span><span class="o">-</span><span class="mf">3.5000e+00</span><span class="p">,</span>  <span class="mf">6.8664e-43</span><span class="p">,</span> <span class="o">-</span><span class="mf">3.5000e+00</span><span class="p">],</span>
        <span class="p">[</span> <span class="mf">6.8664e-43</span><span class="p">,</span> <span class="o">-</span><span class="mf">3.5000e+00</span><span class="p">,</span>  <span class="mf">6.8664e-43</span><span class="p">],</span>
        <span class="p">[</span><span class="o">-</span><span class="mf">3.5000e+00</span><span class="p">,</span>  <span class="mf">6.8664e-43</span><span class="p">,</span> <span class="o">-</span><span class="mf">3.5000e+00</span><span class="p">]])</span>
<span class="n">tensor</span><span class="p">([[</span><span class="mf">0.7657</span><span class="p">,</span> <span class="mf">0.9727</span><span class="p">,</span> <span class="mf">0.8171</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.4207</span><span class="p">,</span> <span class="mf">0.5959</span><span class="p">,</span> <span class="mf">0.0613</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.6160</span><span class="p">,</span> <span class="mf">0.1067</span><span class="p">,</span> <span class="mf">0.9163</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.5395</span><span class="p">,</span> <span class="mf">0.3684</span><span class="p">,</span> <span class="mf">0.0397</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.6616</span><span class="p">,</span> <span class="mf">0.6742</span><span class="p">,</span> <span class="mf">0.3994</span><span class="p">]])</span>
</code></pre></div>

<ul>
<li>加法：原位/原地操作(in-place）</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="c1"># x 加到 y 上</span>
<span class="nb">print</span><span class="p">(</span><span class="n">y</span><span class="p">)</span>
<span class="n">y</span><span class="o">.</span><span class="n">add_</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">y</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="mf">0.7657</span><span class="p">,</span> <span class="mf">0.9727</span><span class="p">,</span> <span class="mf">0.8171</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.4207</span><span class="p">,</span> <span class="mf">0.5959</span><span class="p">,</span> <span class="mf">0.0613</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.6160</span><span class="p">,</span> <span class="mf">0.1067</span><span class="p">,</span> <span class="mf">0.9163</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.5395</span><span class="p">,</span> <span class="mf">0.3684</span><span class="p">,</span> <span class="mf">0.0397</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.6616</span><span class="p">,</span> <span class="mf">0.6742</span><span class="p">,</span> <span class="mf">0.3994</span><span class="p">]])</span>
<span class="n">tensor</span><span class="p">([[</span><span class="mf">0.7657</span><span class="p">,</span> <span class="mf">0.9727</span><span class="p">,</span> <span class="mf">0.8171</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.4207</span><span class="p">,</span> <span class="mf">0.5959</span><span class="p">,</span> <span class="mf">0.0613</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.6160</span><span class="p">,</span> <span class="mf">0.1067</span><span class="p">,</span> <span class="mf">0.9163</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.5395</span><span class="p">,</span> <span class="mf">0.3684</span><span class="p">,</span> <span class="mf">0.0397</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">0.6616</span><span class="p">,</span> <span class="mf">0.6742</span><span class="p">,</span> <span class="mf">0.3994</span><span class="p">]])</span>
</code></pre></div>

<div class='container' style='margin-top:40px;margin-bottom:20px;'>
    <div style='background-color:#54c7ec;height:36px;line-height:36px;vertical-align:middle;'>
        <div style='margin-left:10px'>
            <font color='white' size=4>
                • 注意
            </font>
        </div>
    </div>
    <div style='background-color:#F3F4F7'>
        <div style='padding:15px 10px 15px 20px;line-height:1.5;'>
            任何原位转换操作都是以 _为后缀的，例如：<code>x.copy_()</code>, <code>x.t_()</code>会改变<code>x</code>
        </div>    
    </div>    
</div>

<ul>
<li>你也可以使用所有标准numpy式的索引操作。</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="p">[:,</span> <span class="mi">1</span><span class="p">])</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([</span> <span class="mf">0.1766</span><span class="p">,</span>  <span class="mf">0.5636</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.0284</span><span class="p">,</span>  <span class="mf">0.4280</span><span class="p">,</span>  <span class="mf">0.5803</span><span class="p">])</span>
</code></pre></div>

<ul>
<li>改变形状：你可以使用<code>torch.view</code>来改变张量的形状：</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">4</span><span class="p">)</span>
<span class="n">y</span> <span class="o">=</span> <span class="n">x</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="mi">16</span><span class="p">)</span>
<span class="n">z</span> <span class="o">=</span> <span class="n">x</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">8</span><span class="p">)</span>  <span class="c1"># -1 表示根据其他维度的大小自动推断该位置的大小</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">size</span><span class="p">(),</span> <span class="n">y</span><span class="o">.</span><span class="n">size</span><span class="p">(),</span> <span class="n">z</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">4</span><span class="p">])</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">16</span><span class="p">])</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">8</span><span class="p">])</span>
</code></pre></div>

<ul>
<li>如果你有一个单元素的张量，可以使用<code>item()</code>来获得它的值作为python数字</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">item</span><span class="p">())</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([</span><span class="o">-</span><span class="mf">0.9112</span><span class="p">])</span>
<span class="o">-</span><span class="mf">0.9112</span>
</code></pre></div>

<p>​        </p>
<blockquote>
<p><strong>后续阅读</strong></p>
<p>​     在<a href="https://pytorch.org/docs/torch">这里</a>查看更多关于转置、索引、切片、数学运算、线性代数、随机数等100多个张量操作的内容。</p>
</blockquote>
<p>​                  </p>
<h3 id="numpy">桥接 Numpy<a class="headerlink" href="#numpy" title="Permanent link">&para;</a></h3>
<p>将一个Torch张量转换为一个NumPy数组是轻而易举的事情，反之亦然。</p>
<p>Torch张量和NumPy数组将共享它们的底层内存位置（如果 Torch 张量在CPU上），因此当一个改变时,另外也会改变。</p>
<ul>
<li>将 torch 张量转化成 numpy 数组</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="n">a</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">ones</span><span class="p">(</span><span class="mi">5</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">a</span><span class="p">)</span>
<span class="n">b</span> <span class="o">=</span> <span class="n">a</span><span class="o">.</span><span class="n">numpy</span><span class="p">()</span>
<span class="nb">print</span><span class="p">(</span><span class="n">b</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">])</span>
<span class="n">array</span><span class="p">([</span><span class="mf">1.</span> <span class="mf">1.</span> <span class="mf">1.</span> <span class="mf">1.</span> <span class="mf">1.</span><span class="p">])</span>
</code></pre></div>

<p>看NumPy数组是如何改变里面的值的：</p>
<div class="codehilite"><pre><span></span><code><span class="n">a</span><span class="o">.</span><span class="n">add_</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">a</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">b</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([</span><span class="mf">2.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">])</span>
<span class="n">array</span><span class="p">([</span><span class="mf">2.</span> <span class="mf">2.</span> <span class="mf">2.</span> <span class="mf">2.</span> <span class="mf">2.</span><span class="p">])</span>
</code></pre></div>

<ul>
<li>将 numpy 数组转化成 torch 张量</li>
</ul>
<p>看 numpy 输出如何自动改变 torch 张量的：</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="n">a</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">ones</span><span class="p">(</span><span class="mi">5</span><span class="p">)</span>
<span class="n">b</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">(</span><span class="n">a</span><span class="p">)</span>
<span class="n">np</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">a</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">a</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">b</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">array</span><span class="p">([</span><span class="mf">2.</span> <span class="mf">2.</span> <span class="mf">2.</span> <span class="mf">2.</span> <span class="mf">2.</span><span class="p">])</span>
<span class="n">tensor</span><span class="p">([</span><span class="mf">2.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float64</span><span class="p">)</span>
</code></pre></div>

<p>CPU上的所有张量(CharTensor除外)都支持与Numpy的相互转换。</p>
<h3 id="cuda">CUDA 上的张量<a class="headerlink" href="#cuda" title="Permanent link">&para;</a></h3>
<p>张量可以使用<code>.to</code>方法移动到任何设备（device）上：</p>
<div class="codehilite"><pre><span></span><code><span class="c1"># 当GPU可用时，我们可以使用以下代码</span>
<span class="c1"># 使用`torch.device`将张量移入或移除GPU</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">():</span>
    <span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">&#39;cuda&#39;</span><span class="p">)</span>                <span class="c1"># 一个 CUDA 设备对象</span>
    <span class="n">y</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">ones_like</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>        <span class="c1"># 直接在 GPU 上创建张量</span>
    <span class="n">x</span> <span class="o">=</span> <span class="n">x</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>                             <span class="c1"># 或者使用`.to(&#39;cuda&#39;)`方法将张量移入GPU</span>
    <span class="n">z</span> <span class="o">=</span> <span class="n">x</span> <span class="o">+</span> <span class="n">y</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">z</span><span class="p">)</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">z</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="s1">&#39;cpu&#39;</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">double</span><span class="p">))</span>             <span class="c1"># `.to`方法也能在移动时改变dtype</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([</span><span class="mf">1.0445</span><span class="p">],</span> <span class="n">device</span><span class="o">=</span><span class="s1">&#39;cuda:0&#39;</span><span class="p">)</span>
<span class="n">tensor</span><span class="p">([</span><span class="mf">1.0445</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float64</span><span class="p">)</span>
</code></pre></div>

<h2 id="autograd">Autograd：自动微分<a class="headerlink" href="#autograd" title="Permanent link">&para;</a></h2>
<p>PyTorch中神经网络的核心是<code>autograd</code>包。先简单介绍一下这个包，然后训练我们的第一个的神经网络。</p>
<p><code>autograd</code>为所有的张量操作提供自动微分能机制。它是一个运行时定义（define-by-run）的框架，这就意味着反向传播是根据你的代码运行方式来决定的，并且每次迭代可以是不同的。</p>
<p>让我们用一些简单的例子来看下吧。</p>
<h3 id="_2">张量<a class="headerlink" href="#_2" title="Permanent link">&para;</a></h3>
<p><code>torch.Tensor</code> 是这个包的核心。如果设置它的属性 <code>.requires_grad</code> 为 <code>True</code>，它就会开始追踪在张量上面的所有操作。完成计算以后可以通过调用 <code>backward()</code> 来自动计算所有的梯度。这个张量的所有梯度将会自动累加到 <code>grad</code>属性上。</p>
<p>你可以通过调用 <code>.detach()</code> 将其与计算历史脱钩，从而阻止张量被历史追踪，并且防止该张量在未来的计算中被追踪。</p>
<p>为了阻止跟踪张量历史记录（和使用内存），可以将代码块包装在 <code>with torch.no_grad():</code> 中。这在评估模型时特别有用，因为模型可能具有 <code>requires_grad = True</code> 的可训练参数，但是我们不需要在此过程中对他们进行梯度计算。</p>
<p>还有一个对于<code>autograd</code> 的实现非常重要的类：<code>Function</code>。</p>
<p><code>Tensor</code> 和 <code>Function</code> 互相连接生成了一个无圈图（acyclic graph），它编码了完整的计算历史。每个张量都有一个 <code>.grad_fn</code> 属性，该属性引用了创建 <code>Tensor</code> 自身的<code>Function</code>（除非这个张量是用户手动创建的，即这个张量的 <code>grad_fn</code> 是 <code>None</code> ）。</p>
<p>如果你想计算梯度，可以在<code>Tensor</code> 上调用 <code>.backward()</code> 方法。如果 <code>Tensor</code> 是一个标量，你不需要给<code>backward()</code> 提供任何额外的参数，但是如果它有更多的元素的话，则需要指定一个 <code>gradient</code> 参数，该参数是形状与张量相同的。</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch</span>
</code></pre></div>

<p>创建一个张量并设置 <code>requires_grad=True</code> 来追踪其计算历史</p>
<div class="codehilite"><pre><span></span><code><span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">ones</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span><span class="mi">2</span><span class="p">,</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">1.</span><span class="p">]],</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</code></pre></div>

<p>对这个张量做一次计算：</p>
<div class="codehilite"><pre><span></span><code><span class="n">y</span> <span class="o">=</span> <span class="n">x</span> <span class="o">+</span> <span class="mi">2</span>
<span class="nb">print</span><span class="p">(</span><span class="n">y</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="mf">3.</span><span class="p">,</span> <span class="mf">3.</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">3.</span><span class="p">,</span> <span class="mf">3.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">AddBackward0</span><span class="o">&gt;</span><span class="p">)</span>
</code></pre></div>

<p><code>y</code> 作为计算的结果被创建，所以它也有 <code>grad_fn</code> 属性。</p>
<div class="codehilite"><pre><span></span><code><span class="nb">print</span><span class="p">(</span><span class="n">y</span><span class="o">.</span><span class="n">grad_fn</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="o">&lt;</span><span class="n">AddBackward0</span> <span class="nb">object</span> <span class="n">at</span> <span class="mh">0x000001EAC1842DA0</span><span class="o">&gt;</span>
</code></pre></div>

<p>在 <code>y</code> 上做更多的操作：</p>
<div class="codehilite"><pre><span></span><code><span class="n">z</span> <span class="o">=</span> <span class="n">y</span> <span class="o">*</span> <span class="n">y</span> <span class="o">*</span> <span class="mi">3</span>
<span class="n">out</span> <span class="o">=</span> <span class="n">z</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span>
<span class="nb">print</span><span class="p">(</span><span class="n">z</span><span class="p">)</span> 
<span class="nb">print</span><span class="p">(</span><span class="n">out</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="mf">27.</span><span class="p">,</span> <span class="mf">27.</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">27.</span><span class="p">,</span> <span class="mf">27.</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">MulBackward0</span><span class="o">&gt;</span><span class="p">)</span> 
<span class="n">tensor</span><span class="p">(</span><span class="mf">27.</span><span class="p">,</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">MeanBackward0</span><span class="o">&gt;</span><span class="p">)</span>
</code></pre></div>

<p><code>.requires_grad(...)</code> 原地改变了已经存在的张量的 <code>requires_grad</code> 标记。如果没有指定的话，输入标记的默认值是 <code>False</code>。</p>
<div class="codehilite"><pre><span></span><code><span class="n">a</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
<span class="n">a</span> <span class="o">=</span> <span class="p">((</span><span class="n">a</span> <span class="o">*</span> <span class="mi">3</span><span class="p">)</span> <span class="o">/</span> <span class="p">(</span><span class="n">a</span> <span class="o">-</span> <span class="mi">1</span><span class="p">))</span>
<span class="nb">print</span><span class="p">(</span><span class="n">a</span><span class="o">.</span><span class="n">requires_grad</span><span class="p">)</span>
<span class="n">a</span><span class="o">.</span><span class="n">requires_grad_</span><span class="p">(</span><span class="kc">True</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">a</span><span class="o">.</span><span class="n">requires_grad</span><span class="p">)</span>
<span class="n">b</span> <span class="o">=</span> <span class="p">(</span><span class="n">a</span> <span class="o">*</span> <span class="n">a</span><span class="p">)</span><span class="o">.</span><span class="n">sum</span><span class="p">()</span>
<span class="nb">print</span><span class="p">(</span><span class="n">b</span><span class="o">.</span><span class="n">grad_fn</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="kc">False</span>
<span class="kc">True</span>
<span class="o">&lt;</span><span class="n">SumBackward0</span> <span class="nb">object</span> <span class="n">at</span> <span class="mh">0x000001EAC185CC88</span><span class="o">&gt;</span>
</code></pre></div>

<h3 id="_3">梯度<a class="headerlink" href="#_3" title="Permanent link">&para;</a></h3>
<p>现在让我们开始反向传播。由于 <code>out</code> 是一个标量，<code>out.backward</code> 等效于 <code>out.backward(torch.tensor(1.))</code>。</p>
<div class="codehilite"><pre><span></span><code><span class="n">out</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
</code></pre></div>

<p>打印梯度 <script type="math/tex">d(out)/dx</script>：</p>
<div class="codehilite"><pre><span></span><code><span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">grad</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="mf">4.5000</span><span class="p">,</span> <span class="mf">4.5000</span><span class="p">],</span>
        <span class="p">[</span><span class="mf">4.5000</span><span class="p">,</span> <span class="mf">4.5000</span><span class="p">]])</span>
</code></pre></div>

<p>你应该会得到一个 <code>4.5</code>的矩阵。让我们来调用  <code>out</code> 张量 <script type="math/tex">o​</script>，就可以得到 <script type="math/tex">o=\frac{1}{4}\sum_iz_i​</script> ，其中<script type="math/tex">z_i=3(x_i+2)^2</script> 以及 <script type="math/tex">z_i|_{x_i=1}=27</script>。因此，<script type="math/tex">\frac{\partial o}{\partial x_i}=\frac{3}{2}(x_i+2)</script>，因而<script type="math/tex">\frac{\partial o}{\partial x_i}|_{x_i=1}=\frac{9}{2}=4.5</script>。</p>
<p>数学上，若有一个矢量函数<script type="math/tex">\vec y = f(\vec x)</script>，那么 <script type="math/tex">\vec y</script> 相对于 <script type="math/tex">\vec x</script> 的梯度是一个雅克比矩阵：
<script type="math/tex; mode=display">
J=\left(
\begin{matrix}
\frac{\partial y_1}{\partial x_1}      & \cdots   &  \frac{\partial y_m}{\partial x_1}      \\
 \vdots & \ddots & \vdots \\
 \frac{\partial y_1}{\partial x_n}      & \cdots      & \frac{\partial y_m}{\partial x_n}      \\
\end{matrix}
\right)
</script>
通常来说，<code>torch.autograd</code> 是一个可计算雅克比矢量积的“引擎”。也就是说，给定任意向量 <script type="math/tex">v=(v_1, v_2, ..., v_m)^{T}</script>，计算乘积<script type="math/tex">v^T \cdot J</script>。如果 <script type="math/tex">v</script> 恰好是一个标量函数 <script type="math/tex">l = g(\vec x)</script> 的导数，即
<script type="math/tex; mode=display">
v = \left(\frac{\partial l}{\partial y_1} \cdots \frac{\partial l}{\partial y_m}\right)^T
</script>
那么根据链式法则，雅克比矢量积应该是<script type="math/tex">\</script> 对 <script type="math/tex">\vec x</script> 的导数：
<script type="math/tex; mode=display">
J^T \cdot v=\left(
\begin{matrix}
\frac{\partial y_1}{\partial x_1}      & \cdots   &  \frac{\partial y_m}{\partial x_1}      \\
 \vdots & \ddots & \vdots \\
 \frac{\partial y_1}{\partial x_n}      & \cdots      & \frac{\partial y_m}{\partial x_n}      \\
\end{matrix}
\right)
\left(
\begin{matrix}
\frac{\partial l}{\partial y_1} \\
\vdots \\
\frac{\partial l}{\partial y_m}
\end{matrix}
\right)
=
\left(
\begin{matrix}
\frac{\partial l}{\partial x_1} \\
\vdots \\
\frac{\partial l}{\partial x_n}
\end{matrix}
\right)
</script>
</p>
<div class='container' style='margin-top:40px;margin-bottom:20px;'>
    <div style='background-color:#54c7ec;height:36px;line-height:36px;vertical-align:middle;'>
        <div style='margin-left:10px'>
            <font color='white' size=4>
                • 注意
            </font>
        </div>
    </div>
    <div style='background-color:#F3F4F7'>
        <div style='padding:15px 10px 15px 20px;line-height:1.5;'>
            行向量的\(v^T \cdot  J\)也可以被视作列向量的\(J^T \cdot v\)
        </div>    
    </div>    
</div>

<p>雅可比矢量积的这一特性使得将外部梯度输入到具有非标量输出的模型中变得非常方便。</p>
<p>现在我们来看一个雅可比向量积的例子:</p>
<div class="codehilite"><pre><span></span><code><span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">y</span> <span class="o">=</span> <span class="n">x</span> <span class="o">*</span> <span class="mi">2</span>
<span class="k">while</span> <span class="n">y</span><span class="o">.</span><span class="n">data</span><span class="o">.</span><span class="n">norm</span><span class="p">()</span> <span class="o">&lt;</span> <span class="mi">100</span><span class="p">:</span>
    <span class="n">y</span> <span class="o">=</span> <span class="n">y</span> <span class="o">*</span> <span class="mi">2</span>
<span class="nb">print</span><span class="p">(</span><span class="n">y</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([</span><span class="o">-</span><span class="mf">1402.4813</span><span class="p">,</span>    <span class="mf">24.2467</span><span class="p">,</span> <span class="o">-</span><span class="mf">1102.0972</span><span class="p">],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">MulBackward0</span><span class="o">&gt;</span><span class="p">)</span>
</code></pre></div>

<p>在这种情况下，<code>y</code> 不再是标量。<code>torch.autograd</code> 不能直接计算完整的雅可比矩阵，但是如果我们只想要雅可比矢量积，只需将这个向量作为参数传给 <code>backward</code>：</p>
<div class="codehilite"><pre><span></span><code><span class="n">v</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mf">0.1</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mf">0.0001</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float</span><span class="p">)</span>
<span class="n">y</span><span class="o">.</span><span class="n">backward</span><span class="p">(</span><span class="n">v</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">grad</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([</span><span class="mf">2.0480e+02</span><span class="p">,</span> <span class="mf">2.0480e+03</span><span class="p">,</span> <span class="mf">2.0480e-01</span><span class="p">])</span>
</code></pre></div>

<p>也可以通过将代码块包装在 <code>with torch.no_grad():</code> 中，来阻止<code>autograd</code>跟踪设置了 <code>.requires_grad=True</code> 的张量的历史记录。</p>
<div class="codehilite"><pre><span></span><code><span class="nb">print</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">requires_grad</span><span class="p">)</span>
<span class="nb">print</span><span class="p">((</span><span class="n">x</span> <span class="o">**</span> <span class="mi">2</span><span class="p">)</span><span class="o">.</span><span class="n">requires_grad</span><span class="p">)</span>

<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
    <span class="nb">print</span><span class="p">((</span><span class="n">x</span> <span class="o">**</span> <span class="mi">2</span><span class="p">)</span><span class="o">.</span><span class="n">requires_grad</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="kc">True</span>
<span class="kc">True</span>
<span class="kc">False</span>
</code></pre></div>

<p>​        </p>
<blockquote>
<p>后续阅读：</p>
<p><code>autograd</code> 和 <code>Function</code> 的文档见：<a href="https://pytorch.org/docs/autograd">https://pytorch.org/docs/autograd</a></p>
</blockquote>
<p>​        </p>
<h2 id="_4">神经网络<a class="headerlink" href="#_4" title="Permanent link">&para;</a></h2>
<p>神经网络可以使用 <code>torch.nn</code> 来构建。</p>
<p>现在你已经大致了解了 <code>autograd</code> 了，<code>nn</code> 依赖于 <code>autograd</code> 来定义模型和对其进行微分的。一个 <code>nn.Module</code> 模块包含各个层，以及一个 <code>forward(input)</code> 方法，该方法返回 <code>output</code>。</p>
<p>例如下面这个数字图片的网络分类器——<strong>convnet</strong>：</p>
<p><img alt="" src="https://pytorch.org/tutorials/_images/mnist.png" /></p>
<p>它是一个简单的前馈神经网络。它接收一个输入，将输入一层一层地传递给后面的层，最后给出输出。</p>
<p>典型的神经网络训练过程如下：</p>
<ul>
<li>定义一个具有一些可训练参数（或权重）的神经网络；</li>
<li>迭代所有输入数据集；</li>
<li>通过神经网络处理输入数据；</li>
<li>计算损失（神经网络的输出距离正确答案有多远）；</li>
<li>将梯度后向传递给网络参数；</li>
<li>更新网络权重，一个典型的更新规则：<code>weight = weight - learning_rate * gradient</code>。</li>
</ul>
<h3 id="_5">定义网络<a class="headerlink" href="#_5" title="Permanent link">&para;</a></h3>
<p>我们先来定义一个神经网络：</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">torch.nn</span> <span class="k">as</span> <span class="nn">nn</span>
<span class="kn">import</span> <span class="nn">torch.nn.functional</span> <span class="k">as</span> <span class="nn">F</span>

<span class="k">class</span> <span class="nc">Net</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Net</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">6</span><span class="p">,</span> <span class="mi">3</span><span class="p">)</span>  <span class="c1"># 1个输入图像通道，6个输出通道，3x3卷积核</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">6</span><span class="p">,</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">3</span><span class="p">)</span>
        <span class="c1"># 线性变换：y = Wx + b</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">16</span> <span class="o">*</span> <span class="mi">6</span> <span class="o">*</span> <span class="mi">6</span><span class="p">,</span> <span class="mi">120</span><span class="p">)</span>  <span class="c1"># 6x6表示图像维度</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">120</span><span class="p">,</span> <span class="mi">84</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fc3</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">84</span><span class="p">,</span> <span class="mi">10</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">max_pool2d</span><span class="p">(</span><span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv1</span><span class="p">(</span><span class="n">x</span><span class="p">)),</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">))</span>  <span class="c1"># 2x2大小的最大池化层</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">max_pool2d</span><span class="p">(</span><span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv2</span><span class="p">(</span><span class="n">x</span><span class="p">)),</span> <span class="mi">2</span><span class="p">)</span>  <span class="c1"># 如果池化层是一个方阵，可以指定单个大小</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">x</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_flat_features</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">fc1</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">fc2</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fc3</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span>

    <span class="k">def</span> <span class="nf">num_flat_features</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">size</span> <span class="o">=</span> <span class="n">x</span><span class="o">.</span><span class="n">size</span><span class="p">()[</span><span class="mi">1</span><span class="p">:]</span>  <span class="c1"># 获取除批数据大小以外的所有数据维度</span>
        <span class="n">num_features</span> <span class="o">=</span> <span class="mi">1</span>
        <span class="k">for</span> <span class="n">s</span> <span class="ow">in</span> <span class="n">size</span><span class="p">:</span>
            <span class="n">num_features</span> <span class="o">*=</span> <span class="n">s</span>
        <span class="k">return</span> <span class="n">num_features</span>

<span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="nb">print</span><span class="p">(</span><span class="n">net</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Net</span><span class="p">(</span>
  <span class="p">(</span><span class="n">conv1</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">6</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
  <span class="p">(</span><span class="n">conv2</span><span class="p">):</span> <span class="n">Conv2d</span><span class="p">(</span><span class="mi">6</span><span class="p">,</span> <span class="mi">16</span><span class="p">,</span> <span class="n">kernel_size</span><span class="o">=</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
  <span class="p">(</span><span class="n">fc1</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">576</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">120</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
  <span class="p">(</span><span class="n">fc2</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">120</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">84</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
  <span class="p">(</span><span class="n">fc3</span><span class="p">):</span> <span class="n">Linear</span><span class="p">(</span><span class="n">in_features</span><span class="o">=</span><span class="mi">84</span><span class="p">,</span> <span class="n">out_features</span><span class="o">=</span><span class="mi">10</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="p">)</span>
</code></pre></div>

<p>你必须定义 <code>forward</code> 函数，并且但你使用 <code>autograd</code> 时 <code>backward</code> 函数（用来计算梯度）会自动定义。你可以在 <code>forward</code> 函数中使用任意张量操作。</p>
<p>用<code>net.parameters()</code> 可以返回模型的可训练参数。</p>
<div class="codehilite"><pre><span></span><code><span class="n">params</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">parameters</span><span class="p">())</span>
<span class="nb">print</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">params</span><span class="p">))</span>
<span class="nb">print</span><span class="p">(</span><span class="n">params</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>  <span class="c1"># conv1的权重</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="mi">10</span>
<span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">6</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">])</span>
</code></pre></div>

<p>让我们尝试一个随机的 <code>32x32</code> 的输入，注意：网络期望的输入尺寸是 <code>32x32</code>。为了让网络使用MNIST数据集，请将图片从数据集中改变尺寸到 <code>32x32</code>。</p>
<div class="codehilite"><pre><span></span><code><span class="n">inp</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">)</span> 
<span class="n">out</span> <span class="o">=</span> <span class="n">net</span><span class="p">(</span><span class="n">inp</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">out</span><span class="p">)</span>
</code></pre></div>

<div class='container' style='margin-top:40px;margin-bottom:20px;'>
    <div style='background-color:#54c7ec;height:36px;line-height:36px;vertical-align:middle;'>
        <div style='margin-left:10px'>
            <font color='white' size=4>
                • 注意
            </font>
        </div>
    </div>
    <div style='background-color:#F3F4F7'>
        <div style='padding:15px 10px 15px 20px;line-height:1.5;'>
            源文档中输入变量定义为input，这里改为inp，因为input是Python的关键字，为了保持编码规范，在定义变量的时候尽量避免使用Python关键字。
        </div>    
    </div>    
</div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">([[</span><span class="o">-</span><span class="mf">0.1071</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0474</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0214</span><span class="p">,</span>  <span class="mf">0.0011</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0126</span><span class="p">,</span>  <span class="mf">0.0699</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0472</span><span class="p">,</span>  <span class="mf">0.0035</span><span class="p">,</span>
         <span class="o">-</span><span class="mf">0.1425</span><span class="p">,</span>  <span class="mf">0.0795</span><span class="p">]],</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">AddmmBackward</span><span class="o">&gt;</span><span class="p">)</span>
</code></pre></div>

<p>将所有参数的梯度缓存置零然后使用随机梯度反向传播：</p>
<div class="codehilite"><pre><span></span><code><span class="n">net</span><span class="o">.</span><span class="n">zero_grad</span><span class="p">()</span>
<span class="n">out</span><span class="o">.</span><span class="n">backward</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
</code></pre></div>

<div class='container' style='margin-top:40px;margin-bottom:20px;'>
    <div style='background-color:#54c7ec;height:36px;line-height:36px;vertical-align:middle;'>
        <div style='margin-left:10px'>
            <font color='white' size=4>
                • 注意
            </font>
        </div>
    </div>
    <div style='background-color:#F3F4F7'>
        <div style='padding:15px 10px 15px 20px;line-height:1.5;'>
            <code>torch.nn</code> 只支持小批量处理。整个 <code>torch.nn</code> 包只支持小批量样本的输入，不支持单个样本。比如 <code>nn.Conv2d</code> 接收一个4维张量，即\(n个样本 \times n个通道 \times H高度 \times W宽度\)。如果是一个单独的样本，只需要使用<code>inp.unsqueeze(0)</code>来添加一个“假的”批大小维度。
        </div>    
    </div>    
</div>

<p>在进行下一步之前，我们先来回顾一下目前为止你所见到的类。</p>
<p><strong>复习：</strong></p>
<ul>
<li><code>torch.Tensor</code> ——一个多维数组，支持诸如 <code>backward()</code> 等自动求导操作，同时也保存了张量的梯度。</li>
<li><code>nn.Module</code> —— 神经网络模块，是一种方便封装参数的方式，具有将参数移动到GPU、导出、加载等功能。</li>
<li><code>nn.Parameter</code> —— 张量的一种，当他作为一个属性分配给一个<code>Module</code>的时候，它会被自动注册为一个参数。</li>
<li><code>autograd.Function</code> —— 实现自动求导操作的前向和后向传播，每个张量至少创建一个 <code>Function</code> 节点，该节点链接到创建张量的函数并对其历史进行编码。</li>
</ul>
<p><strong>目前为止，我们学习到了：</strong></p>
<ul>
<li>定义神经网络</li>
<li>处理输入和调用后向传播</li>
</ul>
<p><strong>还剩：</strong></p>
<ul>
<li>计算损失</li>
<li>更新网络参数</li>
</ul>
<h3 id="_6">损失函数<a class="headerlink" href="#_6" title="Permanent link">&para;</a></h3>
<p>损失函数接收 <code>(output, target)</code>对作为输入，然后计算一个值，这个值用来估计网络输出与真实目标之间的差距。</p>
<p>在 <code>nn</code> 包里面有多种不同的<a href="https://pytorch.org/docs/nn.html#loss-functions">损失函数</a>。一个简单的损失是：<code>nn.MSELoss</code>，它计算模型输出与目标之间的平方差。</p>
<p>例如：</p>
<div class="codehilite"><pre><span></span><code><span class="n">out</span> <span class="o">=</span> <span class="n">net</span><span class="p">(</span><span class="n">inp</span><span class="p">)</span>
<span class="n">target</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">10</span><span class="p">)</span>  <span class="c1"># 伪造的目标</span>
<span class="n">target</span> <span class="o">=</span> <span class="n">target</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">)</span>  <span class="c1"># 使目标与输出具有相同的形状</span>
<span class="n">criterion</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">MSELoss</span><span class="p">()</span>

<span class="n">loss</span> <span class="o">=</span> <span class="n">criterion</span><span class="p">(</span><span class="n">out</span><span class="p">,</span> <span class="n">target</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">loss</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">tensor</span><span class="p">(</span><span class="mf">1.3604</span><span class="p">,</span> <span class="n">grad_fn</span><span class="o">=&lt;</span><span class="n">MseLossBackward</span><span class="o">&gt;</span><span class="p">)</span>
</code></pre></div>

<p>现在如果使用 <code>loss</code> 的 <code>.grad_fn</code> 属性跟踪反向传播过程，会看到如下计算图：</p>
<div class="codehilite"><pre><span></span><code><span class="n">inp</span> <span class="o">-&gt;</span> <span class="n">conv2d</span> <span class="o">-&gt;</span> <span class="n">relu</span> <span class="o">-&gt;</span> <span class="n">maxpool2d</span> <span class="o">-&gt;</span> <span class="n">conv2d</span> <span class="o">-&gt;</span> <span class="n">relu</span> <span class="o">-&gt;</span> <span class="n">maxpool2d</span>
    <span class="o">-&gt;</span> <span class="n">view</span> <span class="o">-&gt;</span> <span class="n">linear</span> <span class="o">-&gt;</span> <span class="n">relu</span> <span class="o">-&gt;</span> <span class="n">linear</span> <span class="o">-&gt;</span> <span class="n">relu</span> <span class="o">-&gt;</span> <span class="n">linear</span>
    <span class="o">-&gt;</span> <span class="n">MSELoss</span>
    <span class="o">-&gt;</span> <span class="n">loss</span>
</code></pre></div>

<p>所以，当我们调用<code>loss.backward()</code>，整张图开始关于loss微分，j计算图中所有设置了<code>requires_grad=True</code>的张量的<code>.grad</code>属性累积着梯度张量。</p>
<p>为了说明这一点，让我们向后跟踪几步：</p>
<div class="codehilite"><pre><span></span><code><span class="nb">print</span><span class="p">(</span><span class="n">loss</span><span class="o">.</span><span class="n">grad_fn</span><span class="p">)</span>  <span class="c1"># MSELoss</span>
<span class="nb">print</span><span class="p">(</span><span class="n">loss</span><span class="o">.</span><span class="n">grad_fn</span><span class="o">.</span><span class="n">next_functions</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="mi">0</span><span class="p">])</span>  <span class="c1"># Linear</span>
<span class="nb">print</span><span class="p">(</span><span class="n">loss</span><span class="o">.</span><span class="n">grad_fn</span><span class="o">.</span><span class="n">next_functions</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">next_functions</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="mi">0</span><span class="p">])</span>  <span class="c1"># ReLU</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="o">&lt;</span><span class="n">MseLossBackward</span> <span class="nb">object</span> <span class="n">at</span> <span class="mh">0x000001C7C6F09080</span><span class="o">&gt;</span>
<span class="o">&lt;</span><span class="n">AddmmBackward</span> <span class="nb">object</span> <span class="n">at</span> <span class="mh">0x000001C7C6EFF518</span><span class="o">&gt;</span>
<span class="o">&lt;</span><span class="n">AccumulateGrad</span> <span class="nb">object</span> <span class="n">at</span> <span class="mh">0x000001C7C6EFFCC0</span><span class="o">&gt;</span>
</code></pre></div>

<h3 id="_7">反向传播<a class="headerlink" href="#_7" title="Permanent link">&para;</a></h3>
<p>我们只需要调用<code>loss.backward()</code>就可以进行反向传播。我们需要先清零现有的梯度，否则梯度将会与已有的梯度累加。</p>
<p>现在，我们调用<code>loss.backward()</code>，并查看conv1层的偏置（bias）在反向传播前后的梯度。</p>
<div class="codehilite"><pre><span></span><code><span class="n">net</span><span class="o">.</span><span class="n">zero_grad</span><span class="p">()</span>  <span class="c1"># 所有参数梯度缓存清零</span>

<span class="nb">print</span><span class="p">(</span><span class="s2">&quot;conv1.bias.grad before backward: &quot;</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">conv1</span><span class="o">.</span><span class="n">bias</span><span class="o">.</span><span class="n">grad</span><span class="p">)</span>

<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>

<span class="nb">print</span><span class="p">(</span><span class="s2">&quot;conv1.bias.grad after backward: &quot;</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">conv1</span><span class="o">.</span><span class="n">bias</span><span class="o">.</span><span class="n">grad</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">conv1</span><span class="o">.</span><span class="n">bias</span><span class="o">.</span><span class="n">grad</span> <span class="n">before</span> <span class="n">backward</span><span class="p">:</span> 
<span class="kc">None</span>
<span class="n">conv1</span><span class="o">.</span><span class="n">bias</span><span class="o">.</span><span class="n">grad</span> <span class="n">after</span> <span class="n">backward</span><span class="p">:</span> 
<span class="n">tensor</span><span class="p">([</span> <span class="mf">0.0380</span><span class="p">,</span> <span class="o">-</span><span class="mf">0.0104</span><span class="p">,</span>  <span class="mf">0.0112</span><span class="p">,</span>  <span class="mf">0.0200</span><span class="p">,</span>  <span class="mf">0.0328</span><span class="p">,</span>  <span class="mf">0.0025</span><span class="p">])</span>
</code></pre></div>

<p>现在我们看下怎样使用损失函数。</p>
<p>​        </p>
<blockquote>
<p>后续阅读：</p>
<p>神经网络包包含了各种模块和损失函数，这些模块和损失函数构成了深度神经网络的构建模块。完整的文档列表见<a href="https://pytorch.org/docs/stable/nn.html">这里</a>。</p>
<p>现在唯一要学习的是：</p>
<p>更新网络权重</p>
</blockquote>
<p>​         </p>
<h3 id="_8">更新网络权重<a class="headerlink" href="#_8" title="Permanent link">&para;</a></h3>
<p>实际上最简单的权重更新规则是随机梯度下降（SGD）：
<script type="math/tex; mode=display">
weight = weight - learning\_rate * gradient
</script>
我们可以简单的Python代码实现：</p>
<div class="codehilite"><pre><span></span><code><span class="n">learning_rate</span> <span class="o">=</span> <span class="mf">0.01</span>
<span class="k">for</span> <span class="n">f</span> <span class="ow">in</span> <span class="n">net</span><span class="o">.</span><span class="n">parameters</span><span class="p">():</span>
    <span class="n">f</span><span class="o">.</span><span class="n">data</span><span class="o">.</span><span class="n">sub_</span><span class="p">(</span><span class="n">f</span><span class="o">.</span><span class="n">grad</span><span class="o">.</span><span class="n">data</span> <span class="o">*</span> <span class="n">learning_rate</span><span class="p">)</span>
</code></pre></div>

<p>然而当你使用神经网络的时候，你希望使用不同的更新权重的规则，比如SGD、Nesterov-SGD、Adam、RMSProp等。为了达到这个目的，我们创建了一个小包：<code>torch.optm</code>，它实现了所有的权重更新方法。用起来也非常简单：</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch.optim</span> <span class="k">as</span> <span class="nn">optim</span>

<span class="c1"># 创建优化器</span>
<span class="n">optimzier</span> <span class="o">=</span> <span class="n">optim</span><span class="o">.</span><span class="n">SGD</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">parameters</span><span class="p">(),</span> <span class="n">lr</span><span class="o">=</span><span class="mf">0.01</span><span class="p">)</span>

<span class="c1"># 训练步骤：</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">zero_grad</span><span class="p">()</span>
<span class="n">out</span> <span class="o">=</span> <span class="n">net</span><span class="p">(</span><span class="n">inp</span><span class="p">)</span>
<span class="n">loss</span> <span class="o">=</span> <span class="n">ceriterion</span><span class="p">(</span><span class="n">out</span><span class="p">,</span> <span class="n">target</span><span class="p">)</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">optimizer</span><span class="o">.</span><span class="n">step</span><span class="p">()</span>  <span class="c1"># 更新权重</span>
</code></pre></div>

<div class='container' style='margin-top:40px;margin-bottom:20px;'>
    <div style='background-color:#54c7ec;height:36px;line-height:36px;vertical-align:middle;'>
        <div style='margin-left:10px'>
            <font color='white' size=4>
                • 注意
            </font>
        </div>
    </div>
    <div style='background-color:#F3F4F7'>
        <div style='padding:15px 10px 15px 20px;line-height:1.5;'>
            观察梯度缓存区是如何使用<code>optimizer.zero_grad()</code>手动清零的。这是因为梯度是累加的，正如前面<a href="https://pytorch.apachecn.org/docs/1.4/blitz/neural_networks_tutorial.html#">反向传播章节</a>叙述的那样。
        </div>    
    </div>    
</div>

<h2 id="_9">训练分类器<a class="headerlink" href="#_9" title="Permanent link">&para;</a></h2>
<p>截止目前，你已经看到了如果定义神经网络，计算损失，更新网路权重。接下来你可能会想</p>
<h3 id="_10">数据呢？<a class="headerlink" href="#_10" title="Permanent link">&para;</a></h3>
<p>通常你需要处理图像、文本、音频、视频数据，你可以使用标准的Python包把数据加载到numpy的数组中。然后把它们转化成 <code>torch.*Tensor</code>。</p>
<ul>
<li>对于图像，比如 <code>Pillow</code>、<code>OpenCV</code> 是非常有用的包；</li>
<li>对于音频， 比如 <code>scipy</code> 和 <code>librosa</code> 是非常有用的包；</li>
<li>对于 文本， 原生的  <code>Python</code> 或者基于 <code>Cython</code> 的加载包，或者 <code>NLTK</code> 和 <code>SpaCy</code> 也是非常有用的包。</li>
</ul>
<p>特别是对于视觉，我们已经创建了一个名叫 <code>torchvision</code> 的包，它包含了常用的数据集的加载器，比如 Imagenet, CIFAR10, MNIST等数据集，以及图像数据转换器：<code>torchvision.datasets</code>和<code>torch.utils.data.DataLoader</code>。</p>
<p>这个包为避免写样板代码提供了巨大的便利。</p>
<p>这个教程里，我们使用 CIFAR10 数据集。它有以下几类：飞机、手机、鸟、猫、鹿、狗、青蛙、马、船、卡车。CIFAR 10 中的图片是 3x32x32尺寸的，即 32x32 像素的 3 通道彩色图片。</p>
<p><img alt="" src="https://pytorch.org/tutorials/_images/cifar10.png" /></p>
<h3 id="_11">训练一个图像分类器<a class="headerlink" href="#_11" title="Permanent link">&para;</a></h3>
<p>我们按照下面的步骤：</p>
<ol>
<li>用 <code>torchvision</code> 加载和归一化 CIFAR 10 训练集和测试集数据；</li>
<li>定义卷积神经网络；</li>
<li>定义损失函数；</li>
<li>在训练集上训练神经网络；</li>
<li>在测试集上测试神经网络。</li>
</ol>
<h3 id="1-cifar-10">1. 加载和归一化 CIFAR 10<a class="headerlink" href="#1-cifar-10" title="Permanent link">&para;</a></h3>
<p>用 <code>torchvision</code> 加载CIFAR 10 是非常简单的：</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">torchvision</span>
<span class="kn">import</span> <span class="nn">torchvision.transforms</span> <span class="k">as</span> <span class="nn">transforms</span>
</code></pre></div>

<p><code>torchvision</code> 的输出是数据范围在 [0, 1] 之间的 PILImage 图像。然后我们将它们转化成归一化到 [-1, 1] 的张量。</p>
<div class='container' style='margin-top:40px;margin-bottom:20px;'>
    <div style='background-color:orange;height:36px;line-height:36px;vertical-align:middle;'>
        <div style='margin-left:10px'>
            <font color='white' size=4>
                • 警告
            </font>
        </div>
    </div>
    <div style='background-color:#F3F4F7'>
        <div style='padding:15px 10px 15px 20px;line-height:1.5;'>
            如果你在 Windows 上运行出现 <b>BrokenPipeError</b>，可以尝试设置<code>torch.utils.data.DataLoader()</code>的<code>num_workers=0</code>。
        </div>    
    </div>    
</div>

<div class="codehilite"><pre><span></span><code><span class="n">transform</span> <span class="o">=</span> <span class="n">transforms</span><span class="o">.</span><span class="n">Compose</span><span class="p">(</span>
    <span class="p">[</span>
        <span class="n">transforms</span><span class="o">.</span><span class="n">ToTensor</span><span class="p">(),</span>
        <span class="n">transforms</span><span class="o">.</span><span class="n">Normalize</span><span class="p">((</span><span class="mf">0.5</span><span class="p">,</span> <span class="mf">0.5</span><span class="p">,</span> <span class="mf">0.5</span><span class="p">),</span> <span class="p">(</span><span class="mf">0.5</span><span class="p">,</span> <span class="mf">0.5</span><span class="p">,</span> <span class="mf">0.5</span><span class="p">))</span>
    <span class="p">]</span>
<span class="p">)</span>
<span class="n">trainset</span> <span class="o">=</span> <span class="n">torchvision</span><span class="o">.</span><span class="n">datasets</span><span class="o">.</span><span class="n">CIFAR10</span><span class="p">(</span>
    <span class="n">root</span><span class="o">=</span><span class="s1">&#39;./data&#39;</span><span class="p">,</span>
    <span class="n">train</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
    <span class="n">download</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
    <span class="n">transform</span><span class="o">=</span><span class="n">transform</span>
<span class="p">)</span>
<span class="n">trainloader</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">utils</span><span class="o">.</span><span class="n">data</span><span class="o">.</span><span class="n">DataLoader</span><span class="p">(</span>
    <span class="n">trainset</span><span class="p">,</span> 
    <span class="n">batch_size</span><span class="o">=</span><span class="mi">4</span><span class="p">,</span>
    <span class="n">shuffle</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
    <span class="n">num_workers</span><span class="o">=</span><span class="mi">2</span>
<span class="p">)</span>
<span class="n">testset</span> <span class="o">=</span> <span class="n">torchvision</span><span class="o">.</span><span class="n">datasets</span><span class="o">.</span><span class="n">CIFAR10</span><span class="p">(</span>
    <span class="n">root</span><span class="o">=</span><span class="s1">&#39;./data&#39;</span><span class="p">,</span>
    <span class="n">train</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
    <span class="n">dowmload</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
    <span class="n">transform</span><span class="o">=</span><span class="n">transform</span>
<span class="p">)</span>
<span class="n">testloader</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">utils</span><span class="o">.</span><span class="n">data</span><span class="o">.</span><span class="n">DataLoader</span><span class="p">(</span>
    <span class="n">testset</span><span class="p">,</span> 
    <span class="n">batch_size</span><span class="o">=</span><span class="mi">4</span><span class="p">,</span>
    <span class="n">shuffle</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
    <span class="n">num_workers</span><span class="o">=</span><span class="mi">2</span>
<span class="p">)</span>
<span class="n">classes</span> <span class="o">=</span> <span class="p">(</span>
    <span class="s2">&quot;plane&quot;</span><span class="p">,</span>
    <span class="s2">&quot;car&quot;</span><span class="p">,</span>
    <span class="s2">&quot;bird&quot;</span><span class="p">,</span>
    <span class="s2">&quot;cat&quot;</span><span class="p">,</span>
    <span class="s2">&quot;deer&quot;</span><span class="p">,</span>
    <span class="s2">&quot;dog&quot;</span><span class="p">,</span>
    <span class="s2">&quot;frog&quot;</span><span class="p">,</span>
    <span class="s2">&quot;horse&quot;</span><span class="p">,</span>
    <span class="s2">&quot;ship&quot;</span><span class="p">,</span>
    <span class="s2">&quot;truck&quot;</span>
<span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Downloading</span> <span class="n">https</span><span class="p">:</span><span class="o">//</span><span class="n">www</span><span class="o">.</span><span class="n">cs</span><span class="o">.</span><span class="n">toronto</span><span class="o">.</span><span class="n">edu</span><span class="o">/~</span><span class="n">kriz</span><span class="o">/</span><span class="n">cifar</span><span class="o">-</span><span class="mi">10</span><span class="o">-</span><span class="n">python</span><span class="o">.</span><span class="n">tar</span><span class="o">.</span><span class="n">gz</span> <span class="n">to</span> <span class="o">./</span><span class="n">data</span><span class="o">/</span><span class="n">cifar</span><span class="o">-</span><span class="mi">10</span><span class="o">-</span><span class="n">python</span><span class="o">.</span><span class="n">tar</span><span class="o">.</span><span class="n">gz</span>
<span class="n">Files</span> <span class="n">already</span> <span class="n">downloaded</span> <span class="ow">and</span> <span class="n">verified</span>
</code></pre></div>

<p>我们先来看下这些图片：</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>

<span class="k">def</span> <span class="nf">imshow</span><span class="p">(</span><span class="n">img</span><span class="p">):</span>
    <span class="n">img</span> <span class="o">=</span> <span class="n">img</span> <span class="o">/</span> <span class="mi">2</span> <span class="o">+</span> <span class="mf">0.5</span>    <span class="c1"># 反归一化</span>
    <span class="n">npimg</span> <span class="o">=</span> <span class="n">img</span><span class="o">.</span><span class="n">numpy</span><span class="p">()</span>
    <span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">transpose</span><span class="p">(</span><span class="n">npimg</span><span class="p">,</span> <span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">0</span><span class="p">)))</span>
    <span class="n">plt</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>

<span class="c1"># 随机获取一些训练图片</span>
<span class="n">dataiter</span> <span class="o">=</span> <span class="nb">iter</span><span class="p">(</span><span class="n">trainloader</span><span class="p">)</span>
<span class="n">images</span><span class="p">,</span> <span class="n">labels</span> <span class="o">=</span> <span class="n">dataiter</span><span class="o">.</span><span class="n">next</span><span class="p">()</span>

<span class="c1"># 展示图片</span>
<span class="n">imshow</span><span class="p">(</span><span class="n">torchvision</span><span class="o">.</span><span class="n">utils</span><span class="o">.</span><span class="n">make_grid</span><span class="p">(</span><span class="n">images</span><span class="p">))</span>
<span class="c1"># 打印标签</span>
<span class="nb">print</span><span class="p">(</span><span class="s2">&quot; &quot;</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;%5s&quot;</span> <span class="o">%</span> <span class="n">classes</span><span class="p">[</span><span class="n">labels</span><span class="p">[</span><span class="n">j</span><span class="p">]]</span> <span class="k">for</span> <span class="n">j</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">4</span><span class="p">)))</span>
</code></pre></div>

<p><img alt="" src="https://pytorch.org/tutorials/_images/sphx_glr_cifar10_tutorial_001.png" /></p>
<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">deer</span>  <span class="n">frog</span>  <span class="n">bird</span>   <span class="n">car</span>
</code></pre></div>

<h3 id="2">2. 定义卷积神经网络<a class="headerlink" href="#2" title="Permanent link">&para;</a></h3>
<p>从上一节神经网络中把我们定义的神经网络复制过来，然后把单通道修改成3通道。</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch.nn</span> <span class="k">as</span> <span class="nn">nn</span>
<span class="kn">import</span> <span class="nn">torch.nn.functional</span> <span class="k">as</span> <span class="nn">F</span>


<span class="k">class</span> <span class="nc">Net</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Net</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">6</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span>  <span class="c1"># 单通道改成3通道</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">pool</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">MaxPool2d</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">6</span><span class="p">,</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">5</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">16</span> <span class="o">*</span> <span class="mi">5</span> <span class="o">*</span> <span class="mi">5</span><span class="p">,</span> <span class="mi">120</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">120</span><span class="p">,</span> <span class="mi">84</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fc3</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">84</span><span class="p">,</span> <span class="mi">10</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">pool</span><span class="p">(</span><span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv1</span><span class="p">(</span><span class="n">x</span><span class="p">)))</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">pool</span><span class="p">(</span><span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv2</span><span class="p">(</span><span class="n">x</span><span class="p">)))</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">x</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">16</span> <span class="o">*</span> <span class="mi">5</span> <span class="o">*</span> <span class="mi">5</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">fc1</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">fc2</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fc3</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span>


<span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
</code></pre></div>

<h3 id="3">3. 定义损失函数和优化器<a class="headerlink" href="#3" title="Permanent link">&para;</a></h3>
<p>我们使用分类交叉熵损失和momentum-SGD优化器：</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch.optim</span> <span class="k">as</span> <span class="nn">optim</span>

<span class="n">criterion</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">CrossEntropyLoss</span><span class="p">()</span>
<span class="n">optimizer</span> <span class="o">=</span> <span class="n">optim</span><span class="o">.</span><span class="n">SGD</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">parameters</span><span class="p">(),</span> <span class="n">lr</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
</code></pre></div>

<h3 id="4">4. 训练神经网络<a class="headerlink" href="#4" title="Permanent link">&para;</a></h3>
<p>从现在开始，事情就变得有趣了。我们只需要遍历我们的数据迭代器，并将输入“喂”给网络和优化函数。</p>
<div class="codehilite"><pre><span></span><code><span class="k">for</span> <span class="n">epoch</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">2</span><span class="p">):</span>  <span class="c1"># 多次循环遍历数据集</span>
    <span class="n">running_loss</span> <span class="o">=</span> <span class="mf">0.0</span>
    <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">data</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">trainloader</span><span class="p">,</span> <span class="mi">0</span><span class="p">):</span>
        <span class="n">inputs</span><span class="p">,</span> <span class="n">labels</span> <span class="o">=</span> <span class="n">data</span>  <span class="c1"># 获取输入；data是一个列表：[inputs, labels]</span>
        <span class="n">optimizer</span><span class="o">.</span><span class="n">zero_grad</span><span class="p">()</span>  <span class="c1"># 参数梯度归零</span>

        <span class="c1"># forward + backward + optimize</span>
        <span class="n">outputs</span> <span class="o">=</span> <span class="n">net</span><span class="p">(</span><span class="n">inputs</span><span class="p">)</span>
        <span class="n">loss</span> <span class="o">=</span> <span class="n">criterion</span><span class="p">(</span><span class="n">outputs</span><span class="p">,</span> <span class="n">labels</span><span class="p">)</span>
        <span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
        <span class="n">optimizer</span><span class="o">.</span><span class="n">step</span><span class="p">()</span>

        <span class="c1"># 打印统计值</span>
        <span class="n">running_loss</span> <span class="o">+=</span> <span class="n">loss</span><span class="o">.</span><span class="n">item</span><span class="p">()</span>
        <span class="k">if</span> <span class="n">i</span> <span class="o">%</span> <span class="mi">2000</span> <span class="o">==</span> <span class="mi">1999</span><span class="p">:</span>
            <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;[</span><span class="si">%d</span><span class="s1">, </span><span class="si">%5d</span><span class="s1">] loss: </span><span class="si">%.3f</span><span class="s1">&#39;</span> <span class="o">%</span> <span class="p">(</span><span class="n">epoch</span> <span class="o">+</span> <span class="mi">1</span><span class="p">,</span> <span class="n">i</span> <span class="o">+</span> <span class="mi">1</span><span class="p">,</span> <span class="n">running_loss</span> <span class="o">/</span> <span class="mi">2000</span><span class="p">))</span>
            <span class="n">running_loss</span> <span class="o">=</span> <span class="mf">0.0</span>

<span class="nb">print</span><span class="p">(</span><span class="s1">&#39;Finished Training&#39;</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="p">[</span><span class="mi">1</span><span class="p">,</span>  <span class="mi">2000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">2.205</span>
<span class="p">[</span><span class="mi">1</span><span class="p">,</span>  <span class="mi">4000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">1.830</span>
<span class="p">[</span><span class="mi">1</span><span class="p">,</span>  <span class="mi">6000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">1.651</span>
<span class="p">[</span><span class="mi">1</span><span class="p">,</span>  <span class="mi">8000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">1.542</span>
<span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">10000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">1.487</span>
<span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">12000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">1.445</span>
<span class="p">[</span><span class="mi">2</span><span class="p">,</span>  <span class="mi">2000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">1.379</span>
<span class="p">[</span><span class="mi">2</span><span class="p">,</span>  <span class="mi">4000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">1.354</span>
<span class="p">[</span><span class="mi">2</span><span class="p">,</span>  <span class="mi">6000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">1.304</span>
<span class="p">[</span><span class="mi">2</span><span class="p">,</span>  <span class="mi">8000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">1.286</span>
<span class="p">[</span><span class="mi">2</span><span class="p">,</span> <span class="mi">10000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">1.280</span>
<span class="p">[</span><span class="mi">2</span><span class="p">,</span> <span class="mi">12000</span><span class="p">]</span> <span class="n">loss</span><span class="p">:</span> <span class="mf">1.240</span>
<span class="n">Finished</span> <span class="n">Training</span>
</code></pre></div>

<p>快速保存模型：</p>
<div class="codehilite"><pre><span></span><code><span class="n">PATH</span> <span class="o">=</span> <span class="s1">&#39;./cifar_net.pth&#39;</span>
<span class="n">torch</span><span class="o">.</span><span class="n">save</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">state_dict</span><span class="p">(),</span> <span class="n">PATH</span><span class="p">)</span>
</code></pre></div>

<p>更多关于模型保存的内容参看<a href="https://pytorch.org/docs/stable/notes/serialization.html">这里</a>。</p>
<h3 id="5">5. 在测试集上测试模型<a class="headerlink" href="#5" title="Permanent link">&para;</a></h3>
<p>我们已经在训练集上训练了2遍网络。但是我们需要检查网络是否学到了一些东西。</p>
<p>我们将通过预测神经网络输出的标签来检查这个问题，并和正确样本进行（ground-truth）对比。如果预测是正确的，我们将样本添加到正确预测的列表中。</p>
<p>第一步。让我们显示测试集中的图像来熟悉一下。</p>
<div class="codehilite"><pre><span></span><code><span class="n">dataiter</span> <span class="o">=</span> <span class="nb">iter</span><span class="p">(</span><span class="n">testloader</span><span class="p">)</span>
<span class="n">images</span><span class="p">,</span> <span class="n">labels</span> <span class="o">=</span> <span class="n">dataiter</span><span class="o">.</span><span class="n">next</span><span class="p">()</span>

<span class="c1"># 显示图片</span>
<span class="n">imshow</span><span class="p">(</span><span class="n">torchvision</span><span class="o">.</span><span class="n">utils</span><span class="o">.</span><span class="n">make_grid</span><span class="p">(</span><span class="n">images</span><span class="p">))</span>
<span class="nb">print</span><span class="p">(</span><span class="s1">&#39;GroundTruth: &#39;</span><span class="p">,</span> <span class="s1">&#39; &#39;</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="s1">&#39;</span><span class="si">%5s</span><span class="s1">&#39;</span> <span class="o">%</span> <span class="n">classes</span><span class="p">[</span><span class="n">labels</span><span class="p">[</span><span class="n">j</span><span class="p">]]</span> <span class="k">for</span> <span class="n">j</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">4</span><span class="p">)))</span>
</code></pre></div>

<p><img alt="" src="https://pytorch.org/tutorials/_images/sphx_glr_cifar10_tutorial_002.png" /></p>
<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">GroundTruth</span><span class="p">:</span>    <span class="n">cat</span>  <span class="n">ship</span>  <span class="n">ship</span> <span class="n">plane</span>
</code></pre></div>

<p>下面我们重新载入保存的模型（注意：这里保存和预加载并非必要，我们只是展示相关操作）</p>
<div class="codehilite"><pre><span></span><code><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="n">net</span><span class="o">.</span><span class="n">load_state_dict</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">PATH</span><span class="p">))</span>
</code></pre></div>

<p>好了，下面让我们看下神经网络会认为上面的例子是什么：</p>
<div class="codehilite"><pre><span></span><code><span class="n">outputs</span> <span class="o">=</span> <span class="n">net</span><span class="p">(</span><span class="n">images</span><span class="p">)</span>
</code></pre></div>

<p>输出是10个类别的概率值。一个类的概率越高，网络就越认为这个图像属于这个特定的类。让我们得到最高概率值的下标/索引：</p>
<div class="codehilite"><pre><span></span><code><span class="n">_</span><span class="p">,</span> <span class="n">predicted</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">max</span><span class="p">(</span><span class="n">outputs</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>

<span class="nb">print</span><span class="p">(</span><span class="s1">&#39;Predicted: &#39;</span><span class="p">,</span> <span class="s1">&#39; &#39;</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="s1">&#39;</span><span class="si">%5s</span><span class="s1">&#39;</span> <span class="o">%</span> <span class="n">classes</span><span class="p">[</span><span class="n">predicted</span><span class="p">[</span><span class="n">j</span><span class="p">]]</span> <span class="k">for</span> <span class="n">j</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">4</span><span class="p">)))</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Predicted</span><span class="p">:</span>    <span class="n">cat</span>  <span class="n">ship</span>  <span class="n">ship</span> <span class="n">plane</span>
</code></pre></div>

<p>结果看起来非常不错。</p>
<p>让我们看看模型在整个测试集上的表现如何：</p>
<div class="codehilite"><pre><span></span><code><span class="n">correct</span> <span class="o">=</span> <span class="mi">0</span>
<span class="n">total</span> <span class="o">=</span> <span class="mi">0</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
    <span class="k">for</span> <span class="n">data</span> <span class="ow">in</span> <span class="n">testloader</span><span class="p">:</span>
        <span class="n">images</span><span class="p">,</span> <span class="n">labels</span> <span class="o">=</span> <span class="n">data</span>
        <span class="n">outputs</span> <span class="o">=</span> <span class="n">net</span><span class="p">(</span><span class="n">images</span><span class="p">)</span>
        <span class="n">_</span><span class="p">,</span> <span class="n">predicted</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">max</span><span class="p">(</span><span class="n">outputs</span><span class="o">.</span><span class="n">data</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
        <span class="n">total</span> <span class="o">+=</span> <span class="n">labels</span><span class="o">.</span><span class="n">size</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span>
        <span class="n">correct</span> <span class="o">+=</span> <span class="p">(</span><span class="n">predicted</span> <span class="o">==</span> <span class="n">labels</span><span class="p">)</span><span class="o">.</span><span class="n">sum</span><span class="p">()</span><span class="o">.</span><span class="n">item</span><span class="p">()</span>

<span class="nb">print</span><span class="p">(</span><span class="s1">&#39;Accuracy of the network on the 10000 test images: </span><span class="si">%d</span><span class="s1"> </span><span class="si">%%</span><span class="s1">&#39;</span> <span class="o">%</span> <span class="p">(</span><span class="mi">100</span> <span class="o">*</span> <span class="n">correct</span> <span class="o">/</span> <span class="n">total</span><span class="p">))</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Accuracy</span> <span class="n">of</span> <span class="n">the</span> <span class="n">network</span> <span class="n">on</span> <span class="n">the</span> <span class="mi">10000</span> <span class="n">test</span> <span class="n">images</span><span class="p">:</span> <span class="mi">56</span> <span class="o">%</span>
</code></pre></div>

<p>看起来还不错，至少比随机挑选（从10个类别中随机选一个的正确率是10%）的结果要好得多。说明模型确实学到了了一些东西。</p>
<p>那么模型在哪些类上表现的好？哪些类上表现的较差呢？</p>
<div class="codehilite"><pre><span></span><code><span class="n">class_correct</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="mf">0.</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">10</span><span class="p">))</span>
<span class="n">class_total</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="mf">0.</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">10</span><span class="p">))</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
    <span class="k">for</span> <span class="n">data</span> <span class="ow">in</span> <span class="n">testloader</span><span class="p">:</span>
        <span class="n">images</span><span class="p">,</span> <span class="n">labels</span> <span class="o">=</span> <span class="n">data</span>
        <span class="n">outputs</span> <span class="o">=</span> <span class="n">net</span><span class="p">(</span><span class="n">images</span><span class="p">)</span>
        <span class="n">_</span><span class="p">,</span> <span class="n">predicted</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">max</span><span class="p">(</span><span class="n">outputs</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
        <span class="n">c</span> <span class="o">=</span> <span class="p">(</span><span class="n">predicted</span> <span class="o">==</span> <span class="n">labels</span><span class="p">)</span><span class="o">.</span><span class="n">squeeze</span><span class="p">()</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">4</span><span class="p">):</span>
            <span class="n">labels</span>  <span class="n">labels</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
            <span class="n">class_correct</span><span class="p">[</span><span class="n">label</span><span class="p">]</span> <span class="o">+=</span> <span class="n">c</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">item</span><span class="p">()</span>
            <span class="n">class_total</span><span class="p">[</span><span class="n">label</span><span class="p">]</span> <span class="o">+=</span> <span class="mi">1</span>

<span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">10</span><span class="p">):</span>
    <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;Accuracy of </span><span class="si">%5s</span><span class="s1"> : </span><span class="si">%2d</span><span class="s1"> </span><span class="si">%%</span><span class="s1">&#39;</span> <span class="o">%</span> <span class="p">(</span><span class="n">classes</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="mi">100</span> <span class="o">*</span> <span class="n">class_correct</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">/</span> <span class="n">class_total</span><span class="p">[</span><span class="n">i</span><span class="p">]))</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Accuracy</span> <span class="n">of</span> <span class="n">plane</span> <span class="p">:</span> <span class="mi">73</span> <span class="o">%</span>
<span class="n">Accuracy</span> <span class="n">of</span>   <span class="n">car</span> <span class="p">:</span> <span class="mi">67</span> <span class="o">%</span>
<span class="n">Accuracy</span> <span class="n">of</span>  <span class="n">bird</span> <span class="p">:</span> <span class="mi">45</span> <span class="o">%</span>
<span class="n">Accuracy</span> <span class="n">of</span>   <span class="n">cat</span> <span class="p">:</span> <span class="mi">36</span> <span class="o">%</span>
<span class="n">Accuracy</span> <span class="n">of</span>  <span class="n">deer</span> <span class="p">:</span> <span class="mi">44</span> <span class="o">%</span>
<span class="n">Accuracy</span> <span class="n">of</span>   <span class="n">dog</span> <span class="p">:</span> <span class="mi">59</span> <span class="o">%</span>
<span class="n">Accuracy</span> <span class="n">of</span>  <span class="n">frog</span> <span class="p">:</span> <span class="mi">64</span> <span class="o">%</span>
<span class="n">Accuracy</span> <span class="n">of</span> <span class="n">horse</span> <span class="p">:</span> <span class="mi">60</span> <span class="o">%</span>
<span class="n">Accuracy</span> <span class="n">of</span>  <span class="n">ship</span> <span class="p">:</span> <span class="mi">58</span> <span class="o">%</span>
<span class="n">Accuracy</span> <span class="n">of</span> <span class="n">truck</span> <span class="p">:</span> <span class="mi">57</span> <span class="o">%</span>
</code></pre></div>

<p>好的，下面我们干什么？</p>
<p>怎样使神经网络在GPU上运行？</p>
<h3 id="6-gpu">6. 在GPU上训练<a class="headerlink" href="#6-gpu" title="Permanent link">&para;</a></h3>
<p>与将一个张量传递给GPU一样，可以这样将神经网络转移到GPU上。</p>
<p>如果我们有cuda可用的话，让我们首先定义第一个设备为可见cuda设备：</p>
<div class="codehilite"><pre><span></span><code><span class="n">device</span> <span class="o">=</span> <span class="n">torh</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">&#39;cuda:0&#39;</span> <span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">()</span> <span class="k">else</span> <span class="s1">&#39;cpu&#39;</span><span class="p">)</span>

<span class="c1"># 假设我们有 CUDA 设备，下面应该会打印出来 CUDA 设备</span>
<span class="nb">print</span><span class="p">(</span><span class="n">devie</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">cuda</span><span class="p">:</span><span class="mi">0</span>
</code></pre></div>

<p>后面的部分都假设 <code>device</code> 是 CUDA 设备。</p>
<p>然后这些方法将递归遍历所有模块，并将它们的参数和缓冲区转换为CUDA张量：</p>
<div class="codehilite"><pre><span></span><code><span class="n">net</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
</code></pre></div>

<p>记住，你必须在每一步都要讲输入和目标发送给GPU：</p>
<div class="codehilite"><pre><span></span><code><span class="n">inputs</span><span class="p">,</span> <span class="n">labels</span> <span class="o">=</span> <span class="n">data</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">),</span> <span class="n">data</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
</code></pre></div>

<p>我们为什么感受不到与CPU相比的巨大加速？因为我们的网络实在是太小了。</p>
<p>尝试一下：加宽你的网络（注意第一个<code>nn.Conv2d</code>的第二个参数和第二个<code>nn.Conv2d</code>的第一个参数要相同），看看能获得多少加速。</p>
<p>已实现的目标：</p>
<ul>
<li>在更高层次上理解PyTorch的Tensor库和神经网络</li>
<li>训练一个小的神经网络做图片分类</li>
</ul>
<p>如果你想在多 GPU 上训练以获得更大的加速，请看下一节：数据并行。</p>
<h2 id="gpu">数据并行（多GPU训练）<a class="headerlink" href="#gpu" title="Permanent link">&para;</a></h2>
<p>这部分我们会学习到怎样用 <code>DataParallel</code> 实现多 GPU 训练。</p>
<p>PyTorch 使用 GPU 训练是非常简单的。你可以把模型放到 GPU 上：</p>
<div class="codehilite"><pre><span></span><code><span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">&quot;cuda:0&quot;</span><span class="p">)</span>
<span class="n">model</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
</code></pre></div>

<p>然后，复制所有张量到 GPU 上。</p>
<div class="codehilite"><pre><span></span><code><span class="n">metensor</span> <span class="o">=</span> <span class="n">my_tensor</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
</code></pre></div>

<div class='container' style='margin-top:40px;margin-bottom:20px;'>
    <div style='background-color:#54c7ec;height:36px;line-height:36px;vertical-align:middle;'>
        <div style='margin-left:10px'>
            <font color='white' size=4>
                • 注意
            </font>
        </div>
    </div>
    <div style='background-color:#F3F4F7'>
        <div style='padding:15px 10px 15px 20px;line-height:1.5;'>
            调用<code>my_tensor.to(device)</code>在 GPU 上返回一个新的<code>my_tensor</code>备份，而不是重写。因此，你需要将它赋值给 GPU 上的一个新的张量。
        </div>    
    </div>    
</div>

<p>在多GPU上执行前向传播和反向传播是自然而然的事。然而，PyTorch 默认只使用一个 GPU。你可以使用<code>DataParallel</code> 让模型并行运行来让你的操作在多个GPU上运行。</p>
<p>这是这篇教程的核心内容，下面我们详细介绍它。</p>
<h3 id="_12">导入和参数<a class="headerlink" href="#_12" title="Permanent link">&para;</a></h3>
<p>导入 PyTorch 模块 以及定义参数：</p>
<div class="codehilite"><pre><span></span><code><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">torch.nn</span> <span class="k">as</span> <span class="nn">nn</span>
<span class="kn">from</span> <span class="nn">torch.utils.data</span> <span class="kn">import</span> <span class="n">Dataset</span><span class="p">,</span> <span class="n">DataLoader</span>

<span class="c1"># 参数和DataLoader</span>
<span class="n">input_size</span> <span class="o">=</span> <span class="mi">5</span>
<span class="n">output_size</span> <span class="o">=</span> <span class="mi">2</span>

<span class="n">batch_size</span> <span class="o">=</span> <span class="mi">30</span>
<span class="n">data_size</span> <span class="o">=</span> <span class="mi">100</span>
</code></pre></div>

<p>设备：</p>
<div class="codehilite"><pre><span></span><code><span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">&quot;cuda:0&quot;</span> <span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">()</span> <span class="k">else</span> <span class="s2">&quot;cpu&quot;</span><span class="p">)</span>
</code></pre></div>

<h3 id="_13">虚拟数据集<a class="headerlink" href="#_13" title="Permanent link">&para;</a></h3>
<p>创建一个随机的虚拟数据集。你只需要实现 <code>getitem</code></p>
<div class="codehilite"><pre><span></span><code><span class="k">class</span> <span class="nc">RandomDataset</span><span class="p">(</span><span class="n">Dataset</span><span class="p">):</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">size</span><span class="p">,</span> <span class="n">length</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">len</span> <span class="o">=</span> <span class="n">length</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">data</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="n">length</span><span class="p">,</span> <span class="n">size</span><span class="p">)</span>

    <span class="k">def</span> <span class="fm">__getitem__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">data</span><span class="p">[</span><span class="n">index</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__len__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">len</span>

<span class="n">rand_loader</span> <span class="o">=</span> <span class="n">DataLoader</span><span class="p">(</span>
    <span class="n">dataset</span><span class="o">=</span><span class="n">RandomDataset</span><span class="p">(</span><span class="n">input_size</span><span class="p">,</span> <span class="n">data_size</span><span class="p">)</span>
    <span class="n">batch_size</span><span class="o">=</span><span class="n">batch_size</span><span class="p">,</span>
    <span class="n">shuffle</span><span class="o">=</span><span class="kc">True</span>
<span class="p">)</span>
</code></pre></div>

<h3 id="_14">简单的模型<a class="headerlink" href="#_14" title="Permanent link">&para;</a></h3>
<p>作为演示，我们的模型只接收一个输入，执行一个线性操作，然后得到结果。然而，你能在任何模型（CNN，RNN，Capsule Net等）上使用<code>DataParallel</code>。</p>
<p>我们在模型内部放置了一条打印语句来检测输入和输出向量的大小。请注意批等级为0时打印的内容。</p>
<div class="codehilite"><pre><span></span><code><span class="k">class</span> <span class="nc">Model</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">):</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_size</span><span class="p">,</span> <span class="n">output_size</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Model</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fc</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="n">input_size</span><span class="p">,</span> <span class="n">output_size</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">inputs</span><span class="p">):</span>
        <span class="n">outputs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fc</span><span class="p">(</span><span class="n">inputs</span><span class="p">)</span>
        <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;</span><span class="se">\t</span><span class="s2">In Model: input size&quot;</span><span class="p">,</span> <span class="nb">input</span><span class="o">.</span><span class="n">size</span><span class="p">(),</span>
              <span class="s2">&quot;output size&quot;</span><span class="p">,</span> <span class="n">output</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>

        <span class="k">return</span> <span class="n">output</span>
</code></pre></div>

<h3 id="dataparallel">创建模型和DataParallel<a class="headerlink" href="#dataparallel" title="Permanent link">&para;</a></h3>
<p>这是本教程的核心部分。首先，我们需要创建一个模型实例和检测我们是否有多个GPU。如果我们有多个GPU，我们使用<code>nn.DataParallel</code>来包装我们的模型。然后通过<code>model.to(device)</code>把模型放到GPU上。</p>
<div class="codehilite"><pre><span></span><code><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">input_size</span><span class="p">,</span> <span class="n">output_size</span><span class="p">)</span>
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">device_count</span><span class="p">()</span> <span class="o">&gt;</span> <span class="mi">1</span><span class="p">:</span>
    <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Let&#39;s use&quot;</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">device_count</span><span class="p">(),</span> <span class="s2">&quot;GPUs!&quot;</span><span class="p">)</span>
    <span class="c1"># dim = 0 [30, xxx] -&gt; [10, ...], [10, ...], [10, ...] on 3 GPUs</span>
    <span class="n">model</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">DataParallel</span><span class="p">(</span><span class="n">model</span><span class="p">)</span>

<span class="n">model</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">Let</span><span class="s1">&#39;s use 2 GPUs!</span>
</code></pre></div>

<h3 id="_15">运行模型<a class="headerlink" href="#_15" title="Permanent link">&para;</a></h3>
<p>现在我们可以 看到输入和输出张量的尺寸了。</p>
<div class="codehilite"><pre><span></span><code><span class="k">for</span> <span class="n">data</span> <span class="ow">in</span> <span class="n">rand_loader</span><span class="p">:</span>
    <span class="n">inputs</span> <span class="o">=</span> <span class="n">data</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
    <span class="n">outputs</span> <span class="o">=</span> <span class="n">model</span><span class="p">(</span><span class="n">inputs</span><span class="p">)</span>
    <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Outside: input size&quot;</span><span class="p">,</span> <span class="nb">input</span><span class="o">.</span><span class="n">size</span><span class="p">(),</span>
          <span class="s2">&quot;output_size&quot;</span><span class="p">,</span> <span class="n">output</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
</code></pre></div>

<p>输出：</p>
<div class="codehilite"><pre><span></span><code><span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
        <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
        <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
        <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
        <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
        <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
        <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">5</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
        <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">5</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
</code></pre></div>

<h3 id="_16">结果<a class="headerlink" href="#_16" title="Permanent link">&para;</a></h3>
<p>当你没有 GPU 或者只有一个 GPU 时，我们对30个输入和输出进行批处理时，我们和期望的一样得到30个输入和30个输出，但是若有多个GPU，会得到如下的结果。</p>
<ul>
<li>2 个 GPU：</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="c1"># on 2 GPUs</span>
<span class="c1"># Let&#39;s use 2 GPUs!</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">15</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">5</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">5</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
</code></pre></div>

<ul>
<li>3 个 GPU</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="c1"># Let&#39;s use 3 GPUs!</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
</code></pre></div>

<ul>
<li>8 个 GPU</li>
</ul>
<div class="codehilite"><pre><span></span><code><span class="c1"># Let&#39;s use 8 GPUs!</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">30</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
    <span class="n">In</span> <span class="n">Model</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
<span class="n">Outside</span><span class="p">:</span> <span class="nb">input</span> <span class="n">size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span> <span class="n">output_size</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">10</span><span class="p">,</span> <span class="mi">2</span><span class="p">])</span>
</code></pre></div>

<h3 id="_17">总结<a class="headerlink" href="#_17" title="Permanent link">&para;</a></h3>
<p><code>DataParallel</code>自动的划分数据，并将任务分发到多个 GPU 上的多个模型。<code>DataParallel</code>会在每个模型完成任务后，收集与合并结果然后返回给你。</p>
<p>更多信息，请参考: <a href="https://pytorch.org/tutorials/beginner/former_torchies/parallelism_tutorial.html">https://pytorch.org/tutorials/beginner/former_torchies/parallelism_tutorial.html</a></p>
<h2 id="_18">更多资料和学习资源<a class="headerlink" href="#_18" title="Permanent link">&para;</a></h2>
<ul>
<li><a href="https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html">训练神经网络玩游戏</a></li>
<li><a href="https://github.com/pytorch/examples/tree/master/imagenet">在ImageNet上训练最佳的ResNet</a></li>
<li><a href="https://github.com/pytorch/examples/tree/master/dcgan">用生成对抗网络训练人脸生成器</a></li>
<li><a href="https://github.com/pytorch/examples/tree/master/word_language_model">用 LSTM 网络训练词级别的语言模型</a></li>
<li><a href="https://github.com/pytorch/examples">更多实例</a></li>
<li><a href="https://github.com/pytorch/tutorials">更多教程</a></li>
<li><a href="https://discuss.pytorch.org/">PyTorch 论坛</a></li>
<li><a href="https://pytorch.slack.com/messages/beginner/">在Slack上与其他用户交流</a></li>
</ul>
              
            </div>
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="../faq/" class="btn btn-neutral float-right" title="PyTorch FAQ">Next <span class="icon icon-circle-arrow-right"></span></a>
      
      
        <a href=".." class="btn btn-neutral" title="主页"><span class="icon icon-circle-arrow-left"></span> Previous</a>
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <!-- Copyright etc -->
    
      <p>©2020 Rogerspy. All rights reserved.</p>
    
  </div>

  Built with <a href="https://www.mkdocs.org/">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</footer>
      
        </div>
      </div>

    </section>

  </div>

  <div class="rst-versions" role="note" aria-label="versions">
    <span class="rst-current-version" data-toggle="rst-current-version">
      
      
        <span><a href=".." style="color: #fcfcfc;">&laquo; Previous</a></span>
      
      
        <span style="margin-left: 15px"><a href="../faq/" style="color: #fcfcfc">Next &raquo;</a></span>
      
    </span>
</div>
    <script>var base_url = '..';</script>
    <script src="../js/theme.js" defer></script>
      <script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML" defer></script>
      <script src="../search/main.js" defer></script>
    <script defer>
        window.onload = function () {
            SphinxRtdTheme.Navigation.enable(true);
        };
    </script>

</body>
</html>
