


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>torch.nn.modules.activation &mdash; PyTorch master documentation</title>
  

  
  
  
  
    <link rel="canonical" href="https://pytorch.org/docs/stable/_modules/torch/nn/modules/activation.html"/>
  

  

  
  
    

  

  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <!-- <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" /> -->
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/css/jit.css" type="text/css" />
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.11.1/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/katex-math.css" type="text/css" />
    <link rel="index" title="Index" href="../../../../genindex.html" />
    <link rel="search" title="Search" href="../../../../search.html" /> 

  
  <script src="../../../../_static/js/modernizr.min.js"></script>

  <!-- Preload the theme fonts -->

<link rel="preload" href="../../../../_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../../_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../../_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../../_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../../_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../../_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">

<!-- Preload the katex fonts -->

<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
</head>

<div class="container-fluid header-holder tutorials-header" id="header-holder">
  <div class="container">
    <div class="header-container">
      <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>

      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <div class="ecosystem-dropdown">
              <a id="dropdownMenuButton" data-toggle="ecosystem-dropdown">
                Ecosystem
              </a>
              <div class="ecosystem-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/hub"">
                  <span class=dropdown-title>Models (Beta)</span>
                  <p>Discover, publish, and reuse pre-trained models</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/ecosystem">
                  <span class=dropdown-title>Tools & Libraries</span>
                  <p>Explore the ecosystem of tools and libraries</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <div class="resources-dropdown">
              <a id="resourcesDropdownButton" data-toggle="resources-dropdown">
                Resources
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/resources"">
                  <span class=dropdown-title>Developer Resources</span>
                  <p>Find resources and get questions answered</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/features">
                  <span class=dropdown-title>About</span>
                  <p>Learn about PyTorch’s features and capabilities</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>

      <a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
    </div>

  </div>
</div>


<body class="pytorch-body">

   

    

    <div class="table-of-contents-link-wrapper">
      <span>Table of Contents</span>
      <a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
    </div>

    <nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
      <div class="pytorch-side-scroll">
        <div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          <div class="pytorch-left-menu-search">
            

            
              
              
                <div class="version">
                  master (1.5.0 )
                </div>
              
            

            


  


<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search Docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

            
          </div>

          
<div>
  <a style="color:#F05732" href="https://pytorch.org/docs/stable/_modules/torch/nn/modules/activation.html">
    You are viewing unstable developer preview docs.
    Click here to view docs for latest stable release.
  </a>
</div>

            
            
              
            
            
              <p class="caption"><span class="caption-text">Notes</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/amp_examples.html">Automatic Mixed Precision examples</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/autograd.html">Autograd mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/broadcasting.html">Broadcasting semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/cpu_threading_torchscript_inference.html">CPU threading and TorchScript inference</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/cuda.html">CUDA semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/ddp.html">Distributed Data Parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/extending.html">Extending PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/faq.html">Frequently Asked Questions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/large_scale_deployments.html">Features for large-scale deployments</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/multiprocessing.html">Multiprocessing best practices</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/randomness.html">Reproducibility</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/serialization.html">Serialization semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../notes/windows.html">Windows FAQ</a></li>
</ul>
<p class="caption"><span class="caption-text">Language Bindings</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/cppdocs/">C++ API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../packages.html">Javadoc</a></li>
</ul>
<p class="caption"><span class="caption-text">Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../torch.html">torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../nn.html">torch.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../nn.functional.html">torch.nn.functional</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../tensors.html">torch.Tensor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../tensor_attributes.html">Tensor Attributes</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../tensor_view.html">Tensor Views</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../autograd.html">torch.autograd</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../cuda.html">torch.cuda</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../amp.html">torch.cuda.amp</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../distributed.html">torch.distributed</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../distributions.html">torch.distributions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../hub.html">torch.hub</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../jit.html">torch.jit</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../nn.init.html">torch.nn.init</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../onnx.html">torch.onnx</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../optim.html">torch.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../quantization.html">Quantization</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../rpc/index.html">Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../random.html">torch.random</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../sparse.html">torch.sparse</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../storage.html">torch.Storage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../bottleneck.html">torch.utils.bottleneck</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../checkpoint.html">torch.utils.checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../cpp_extension.html">torch.utils.cpp_extension</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../data.html">torch.utils.data</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../dlpack.html">torch.utils.dlpack</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../model_zoo.html">torch.utils.model_zoo</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../tensorboard.html">torch.utils.tensorboard</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../type_info.html">Type Info</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../named_tensor.html">Named Tensors</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../name_inference.html">Named Tensors operator coverage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../__config__.html">torch.__config__</a></li>
</ul>
<p class="caption"><span class="caption-text">Libraries</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/audio">torchaudio</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/text">torchtext</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/elastic/">TorchElastic</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/serve">TorchServe</a></li>
<li class="toctree-l1"><a class="reference external" href="http://pytorch.org/xla/">PyTorch on XLA Devices</a></li>
</ul>
<p class="caption"><span class="caption-text">Community</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../community/contribution_guide.html">PyTorch Contribution Guide</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../community/governance.html">PyTorch Governance</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../community/persons_of_interest.html">PyTorch Governance | Persons of Interest</a></li>
</ul>

            
          

        </div>
      </div>
    </nav>

    <div class="pytorch-container">
      <div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
        <div class="pytorch-breadcrumbs-wrapper">
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="pytorch-breadcrumbs">
    
      <li>
        <a href="../../../../index.html">
          
            Docs
          
        </a> &gt;
      </li>

        
          <li><a href="../../../index.html">Module code</a> &gt;</li>
        
          <li><a href="../../../torch.html">torch</a> &gt;</li>
        
      <li>torch.nn.modules.activation</li>
    
    
      <li class="pytorch-breadcrumbs-aside">
        
      </li>
    
  </ul>

  
</div>
        </div>

        <div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
          Shortcuts
        </div>
      </div>

      <section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
        <div class="pytorch-content-left">

        
          
          <div class="rst-content">
          
            <div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
             <article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
              
  <h1>Source code for torch.nn.modules.activation</h1><div class="highlight"><pre>
<span></span><span class="kn">import</span> <span class="nn">warnings</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">from</span> <span class="nn">.</span> <span class="kn">import</span> <span class="n">Linear</span>
<span class="kn">from</span> <span class="nn">torch.nn.init</span> <span class="kn">import</span> <span class="n">xavier_uniform_</span>
<span class="kn">from</span> <span class="nn">torch.nn.init</span> <span class="kn">import</span> <span class="n">constant_</span>
<span class="kn">from</span> <span class="nn">torch.nn.init</span> <span class="kn">import</span> <span class="n">xavier_normal_</span>
<span class="kn">from</span> <span class="nn">torch.nn.parameter</span> <span class="kn">import</span> <span class="n">Parameter</span>
<span class="kn">from</span> <span class="nn">.module</span> <span class="kn">import</span> <span class="n">Module</span>
<span class="kn">from</span> <span class="nn">..</span> <span class="kn">import</span> <span class="n">functional</span> <span class="k">as</span> <span class="n">F</span>


<div class="viewcode-block" id="Threshold"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Threshold">[docs]</a><span class="k">class</span> <span class="nc">Threshold</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Thresholds each element of the input Tensor.</span>

<span class="sd">    Threshold is defined as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        y =</span>
<span class="sd">        \begin{cases}</span>
<span class="sd">        x, &amp;\text{ if } x &gt; \text{threshold} \\</span>
<span class="sd">        \text{value}, &amp;\text{ otherwise }</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Args:</span>
<span class="sd">        threshold: The value to threshold at</span>
<span class="sd">        value: The value to replace with</span>
<span class="sd">        inplace: can optionally do the operation in-place. Default: ``False``</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Threshold(0.1, 20)</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;threshold&#39;</span><span class="p">,</span> <span class="s1">&#39;value&#39;</span><span class="p">,</span> <span class="s1">&#39;inplace&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">threshold</span><span class="p">,</span> <span class="n">value</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Threshold</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">threshold</span> <span class="o">=</span> <span class="n">threshold</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">value</span> <span class="o">=</span> <span class="n">value</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="o">=</span> <span class="n">inplace</span>
        <span class="c1"># TODO: check in THNN (if inplace == True, then assert value &lt;= threshold)</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">threshold</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">threshold</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">value</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">inplace_str</span> <span class="o">=</span> <span class="s1">&#39;, inplace=True&#39;</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="k">else</span> <span class="s1">&#39;&#39;</span>
        <span class="k">return</span> <span class="s1">&#39;threshold=</span><span class="si">{}</span><span class="s1">, value=</span><span class="si">{}{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">threshold</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">value</span><span class="p">,</span> <span class="n">inplace_str</span>
        <span class="p">)</span></div>


<div class="viewcode-block" id="ReLU"><a class="viewcode-back" href="../../../../nn.html#torch.nn.ReLU">[docs]</a><span class="k">class</span> <span class="nc">ReLU</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the rectified linear unit function element-wise:</span>

<span class="sd">    :math:`\text{ReLU}(x) = (x)^+ = \max(0, x)`</span>

<span class="sd">    Args:</span>
<span class="sd">        inplace: can optionally do the operation in-place. Default: ``False``</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/ReLU.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.ReLU()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>


<span class="sd">      An implementation of CReLU - https://arxiv.org/abs/1603.05201</span>

<span class="sd">        &gt;&gt;&gt; m = nn.ReLU()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2).unsqueeze(0)</span>
<span class="sd">        &gt;&gt;&gt; output = torch.cat((m(input),m(-input)))</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;inplace&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">ReLU</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="o">=</span> <span class="n">inplace</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">inplace</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">inplace_str</span> <span class="o">=</span> <span class="s1">&#39;inplace=True&#39;</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="k">else</span> <span class="s1">&#39;&#39;</span>
        <span class="k">return</span> <span class="n">inplace_str</span></div>


<div class="viewcode-block" id="RReLU"><a class="viewcode-back" href="../../../../nn.html#torch.nn.RReLU">[docs]</a><span class="k">class</span> <span class="nc">RReLU</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the randomized leaky rectified liner unit function, element-wise,</span>
<span class="sd">    as described in the paper:</span>

<span class="sd">    `Empirical Evaluation of Rectified Activations in Convolutional Network`_.</span>

<span class="sd">    The function is defined as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{RReLU}(x) =</span>
<span class="sd">        \begin{cases}</span>
<span class="sd">            x &amp; \text{if } x \geq 0 \\</span>
<span class="sd">            ax &amp; \text{ otherwise }</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    where :math:`a` is randomly sampled from uniform distribution</span>
<span class="sd">    :math:`\mathcal{U}(\text{lower}, \text{upper})`.</span>

<span class="sd">     See: https://arxiv.org/pdf/1505.00853.pdf</span>

<span class="sd">    Args:</span>
<span class="sd">        lower: lower bound of the uniform distribution. Default: :math:`\frac{1}{8}`</span>
<span class="sd">        upper: upper bound of the uniform distribution. Default: :math:`\frac{1}{3}`</span>
<span class="sd">        inplace: can optionally do the operation in-place. Default: ``False``</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.RReLU(0.1, 0.3)</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>

<span class="sd">    .. _`Empirical Evaluation of Rectified Activations in Convolutional Network`:</span>
<span class="sd">        https://arxiv.org/abs/1505.00853</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;lower&#39;</span><span class="p">,</span> <span class="s1">&#39;upper&#39;</span><span class="p">,</span> <span class="s1">&#39;inplace&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">lower</span><span class="o">=</span><span class="mf">1.</span> <span class="o">/</span> <span class="mi">8</span><span class="p">,</span> <span class="n">upper</span><span class="o">=</span><span class="mf">1.</span> <span class="o">/</span> <span class="mi">3</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">RReLU</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">lower</span> <span class="o">=</span> <span class="n">lower</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">upper</span> <span class="o">=</span> <span class="n">upper</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="o">=</span> <span class="n">inplace</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">rrelu</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">lower</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">upper</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">training</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">inplace_str</span> <span class="o">=</span> <span class="s1">&#39;, inplace=True&#39;</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="k">else</span> <span class="s1">&#39;&#39;</span>
        <span class="k">return</span> <span class="s1">&#39;lower=</span><span class="si">{}</span><span class="s1">, upper=</span><span class="si">{}{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">lower</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">upper</span><span class="p">,</span> <span class="n">inplace_str</span><span class="p">)</span></div>


<div class="viewcode-block" id="Hardtanh"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Hardtanh">[docs]</a><span class="k">class</span> <span class="nc">Hardtanh</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the HardTanh function element-wise</span>

<span class="sd">    HardTanh is defined as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{HardTanh}(x) = \begin{cases}</span>
<span class="sd">            1 &amp; \text{ if } x &gt; 1 \\</span>
<span class="sd">            -1 &amp; \text{ if } x &lt; -1 \\</span>
<span class="sd">            x &amp; \text{ otherwise } \\</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    The range of the linear region :math:`[-1, 1]` can be adjusted using</span>
<span class="sd">    :attr:`min_val` and :attr:`max_val`.</span>

<span class="sd">    Args:</span>
<span class="sd">        min_val: minimum value of the linear region range. Default: -1</span>
<span class="sd">        max_val: maximum value of the linear region range. Default: 1</span>
<span class="sd">        inplace: can optionally do the operation in-place. Default: ``False``</span>

<span class="sd">    Keyword arguments :attr:`min_value` and :attr:`max_value`</span>
<span class="sd">    have been deprecated in favor of :attr:`min_val` and :attr:`max_val`.</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/Hardtanh.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Hardtanh(-2, 2)</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;min_val&#39;</span><span class="p">,</span> <span class="s1">&#39;max_val&#39;</span><span class="p">,</span> <span class="s1">&#39;inplace&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">min_val</span><span class="o">=-</span><span class="mf">1.</span><span class="p">,</span> <span class="n">max_val</span><span class="o">=</span><span class="mf">1.</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">min_value</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">max_value</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Hardtanh</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="k">if</span> <span class="n">min_value</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span><span class="s2">&quot;keyword argument min_value is deprecated and rename to min_val&quot;</span><span class="p">)</span>
            <span class="n">min_val</span> <span class="o">=</span> <span class="n">min_value</span>
        <span class="k">if</span> <span class="n">max_value</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span><span class="s2">&quot;keyword argument max_value is deprecated and rename to max_val&quot;</span><span class="p">)</span>
            <span class="n">max_val</span> <span class="o">=</span> <span class="n">max_value</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">min_val</span> <span class="o">=</span> <span class="n">min_val</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">max_val</span> <span class="o">=</span> <span class="n">max_val</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="o">=</span> <span class="n">inplace</span>
        <span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_val</span> <span class="o">&gt;</span> <span class="bp">self</span><span class="o">.</span><span class="n">min_val</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">hardtanh</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">min_val</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_val</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">inplace_str</span> <span class="o">=</span> <span class="s1">&#39;, inplace=True&#39;</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="k">else</span> <span class="s1">&#39;&#39;</span>
        <span class="k">return</span> <span class="s1">&#39;min_val=</span><span class="si">{}</span><span class="s1">, max_val=</span><span class="si">{}{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">min_val</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_val</span><span class="p">,</span> <span class="n">inplace_str</span>
        <span class="p">)</span></div>


<div class="viewcode-block" id="ReLU6"><a class="viewcode-back" href="../../../../nn.html#torch.nn.ReLU6">[docs]</a><span class="k">class</span> <span class="nc">ReLU6</span><span class="p">(</span><span class="n">Hardtanh</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{ReLU6}(x) = \min(\max(0,x), 6)</span>

<span class="sd">    Args:</span>
<span class="sd">        inplace: can optionally do the operation in-place. Default: ``False``</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/ReLU6.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.ReLU6()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">ReLU6</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="mf">0.</span><span class="p">,</span> <span class="mf">6.</span><span class="p">,</span> <span class="n">inplace</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">inplace_str</span> <span class="o">=</span> <span class="s1">&#39;inplace=True&#39;</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="k">else</span> <span class="s1">&#39;&#39;</span>
        <span class="k">return</span> <span class="n">inplace_str</span></div>


<div class="viewcode-block" id="Sigmoid"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Sigmoid">[docs]</a><span class="k">class</span> <span class="nc">Sigmoid</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}</span>


<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/Sigmoid.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Sigmoid()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">sigmoid</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span></div>


<span class="k">class</span> <span class="nc">Hardsigmoid</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{Hardsigmoid}(x) = \frac{ReLU6(x + 3)}{6}</span>


<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Hardsigmoid()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">hardsigmoid</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>


<div class="viewcode-block" id="Tanh"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Tanh">[docs]</a><span class="k">class</span> <span class="nc">Tanh</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)} {\exp(x) + \exp(-x)}</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/Tanh.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Tanh()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">tanh</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span></div>


<div class="viewcode-block" id="ELU"><a class="viewcode-back" href="../../../../nn.html#torch.nn.ELU">[docs]</a><span class="k">class</span> <span class="nc">ELU</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))</span>

<span class="sd">    Args:</span>
<span class="sd">        alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0</span>
<span class="sd">        inplace: can optionally do the operation in-place. Default: ``False``</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/ELU.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.ELU()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;alpha&#39;</span><span class="p">,</span> <span class="s1">&#39;inplace&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">alpha</span><span class="o">=</span><span class="mf">1.</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">ELU</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">alpha</span> <span class="o">=</span> <span class="n">alpha</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="o">=</span> <span class="n">inplace</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">elu</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">alpha</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">inplace_str</span> <span class="o">=</span> <span class="s1">&#39;, inplace=True&#39;</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="k">else</span> <span class="s1">&#39;&#39;</span>
        <span class="k">return</span> <span class="s1">&#39;alpha=</span><span class="si">{}{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">alpha</span><span class="p">,</span> <span class="n">inplace_str</span><span class="p">)</span></div>


<div class="viewcode-block" id="CELU"><a class="viewcode-back" href="../../../../nn.html#torch.nn.CELU">[docs]</a><span class="k">class</span> <span class="nc">CELU</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))</span>

<span class="sd">    More details can be found in the paper `Continuously Differentiable Exponential Linear Units`_ .</span>

<span class="sd">    Args:</span>
<span class="sd">        alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0</span>
<span class="sd">        inplace: can optionally do the operation in-place. Default: ``False``</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/CELU.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.CELU()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>

<span class="sd">    .. _`Continuously Differentiable Exponential Linear Units`:</span>
<span class="sd">        https://arxiv.org/abs/1704.07483</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;alpha&#39;</span><span class="p">,</span> <span class="s1">&#39;inplace&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">alpha</span><span class="o">=</span><span class="mf">1.</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">CELU</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">alpha</span> <span class="o">=</span> <span class="n">alpha</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="o">=</span> <span class="n">inplace</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">celu</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">alpha</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">inplace_str</span> <span class="o">=</span> <span class="s1">&#39;, inplace=True&#39;</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="k">else</span> <span class="s1">&#39;&#39;</span>
        <span class="k">return</span> <span class="s1">&#39;alpha=</span><span class="si">{}{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">alpha</span><span class="p">,</span> <span class="n">inplace_str</span><span class="p">)</span></div>


<div class="viewcode-block" id="SELU"><a class="viewcode-back" href="../../../../nn.html#torch.nn.SELU">[docs]</a><span class="k">class</span> <span class="nc">SELU</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applied element-wise, as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{SELU}(x) = \text{scale} * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))</span>

<span class="sd">    with :math:`\alpha = 1.6732632423543772848170429916717` and</span>
<span class="sd">    :math:`\text{scale} = 1.0507009873554804934193349852946`.</span>

<span class="sd">    More details can be found in the paper `Self-Normalizing Neural Networks`_ .</span>

<span class="sd">    Args:</span>
<span class="sd">        inplace (bool, optional): can optionally do the operation in-place. Default: ``False``</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/SELU.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.SELU()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>

<span class="sd">    .. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;inplace&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">SELU</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="o">=</span> <span class="n">inplace</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">selu</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">inplace_str</span> <span class="o">=</span> <span class="s1">&#39;inplace=True&#39;</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="k">else</span> <span class="s1">&#39;&#39;</span>
        <span class="k">return</span> <span class="n">inplace_str</span></div>


<span class="k">class</span> <span class="nc">GLU</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the gated linear unit function</span>
<span class="sd">    :math:`{GLU}(a, b)= a \otimes \sigma(b)` where :math:`a` is the first half</span>
<span class="sd">    of the input matrices and :math:`b` is the second half.</span>

<span class="sd">    Args:</span>
<span class="sd">        dim (int): the dimension on which to split the input. Default: -1</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.GLU()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(4, 2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;dim&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dim</span><span class="o">=-</span><span class="mi">1</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">GLU</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dim</span> <span class="o">=</span> <span class="n">dim</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">glu</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">dim</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="s1">&#39;dim=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dim</span><span class="p">)</span>


<div class="viewcode-block" id="GELU"><a class="viewcode-back" href="../../../../nn.html#torch.nn.GELU">[docs]</a><span class="k">class</span> <span class="nc">GELU</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the Gaussian Error Linear Units function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{GELU}(x) = x * \Phi(x)</span>
<span class="sd">    where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/GELU.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.GELU()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">gelu</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span></div>


<div class="viewcode-block" id="Hardshrink"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Hardshrink">[docs]</a><span class="k">class</span> <span class="nc">Hardshrink</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the hard shrinkage function element-wise:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{HardShrink}(x) =</span>
<span class="sd">        \begin{cases}</span>
<span class="sd">        x, &amp; \text{ if } x &gt; \lambda \\</span>
<span class="sd">        x, &amp; \text{ if } x &lt; -\lambda \\</span>
<span class="sd">        0, &amp; \text{ otherwise }</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Args:</span>
<span class="sd">        lambd: the :math:`\lambda` value for the Hardshrink formulation. Default: 0.5</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/Hardshrink.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Hardshrink()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;lambd&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">lambd</span><span class="o">=</span><span class="mf">0.5</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Hardshrink</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">lambd</span> <span class="o">=</span> <span class="n">lambd</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">hardshrink</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">lambd</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="s1">&#39;</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">lambd</span><span class="p">)</span></div>


<div class="viewcode-block" id="LeakyReLU"><a class="viewcode-back" href="../../../../nn.html#torch.nn.LeakyReLU">[docs]</a><span class="k">class</span> <span class="nc">LeakyReLU</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)</span>


<span class="sd">    or</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{LeakyRELU}(x) =</span>
<span class="sd">        \begin{cases}</span>
<span class="sd">        x, &amp; \text{ if } x \geq 0 \\</span>
<span class="sd">        \text{negative\_slope} \times x, &amp; \text{ otherwise }</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Args:</span>
<span class="sd">        negative_slope: Controls the angle of the negative slope. Default: 1e-2</span>
<span class="sd">        inplace: can optionally do the operation in-place. Default: ``False``</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/LeakyReLU.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.LeakyReLU(0.1)</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;inplace&#39;</span><span class="p">,</span> <span class="s1">&#39;negative_slope&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">negative_slope</span><span class="o">=</span><span class="mf">1e-2</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">LeakyReLU</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">negative_slope</span> <span class="o">=</span> <span class="n">negative_slope</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="o">=</span> <span class="n">inplace</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">leaky_relu</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">negative_slope</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">inplace_str</span> <span class="o">=</span> <span class="s1">&#39;, inplace=True&#39;</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">inplace</span> <span class="k">else</span> <span class="s1">&#39;&#39;</span>
        <span class="k">return</span> <span class="s1">&#39;negative_slope=</span><span class="si">{}{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">negative_slope</span><span class="p">,</span> <span class="n">inplace_str</span><span class="p">)</span></div>


<div class="viewcode-block" id="LogSigmoid"><a class="viewcode-back" href="../../../../nn.html#torch.nn.LogSigmoid">[docs]</a><span class="k">class</span> <span class="nc">LogSigmoid</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right)</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/LogSigmoid.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.LogSigmoid()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">logsigmoid</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span></div>


<div class="viewcode-block" id="Softplus"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Softplus">[docs]</a><span class="k">class</span> <span class="nc">Softplus</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))</span>

<span class="sd">    SoftPlus is a smooth approximation to the ReLU function and can be used</span>
<span class="sd">    to constrain the output of a machine to always be positive.</span>

<span class="sd">    For numerical stability the implementation reverts to the linear function</span>
<span class="sd">    when :math:`input \times \beta &gt; threshold`.</span>

<span class="sd">    Args:</span>
<span class="sd">        beta: the :math:`\beta` value for the Softplus formulation. Default: 1</span>
<span class="sd">        threshold: values above this revert to a linear function. Default: 20</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/Softplus.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Softplus()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;beta&#39;</span><span class="p">,</span> <span class="s1">&#39;threshold&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">beta</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">threshold</span><span class="o">=</span><span class="mi">20</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Softplus</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">beta</span> <span class="o">=</span> <span class="n">beta</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">threshold</span> <span class="o">=</span> <span class="n">threshold</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">softplus</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">threshold</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="s1">&#39;beta=</span><span class="si">{}</span><span class="s1">, threshold=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">beta</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">threshold</span><span class="p">)</span></div>


<div class="viewcode-block" id="Softshrink"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Softshrink">[docs]</a><span class="k">class</span> <span class="nc">Softshrink</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the soft shrinkage function elementwise:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{SoftShrinkage}(x) =</span>
<span class="sd">        \begin{cases}</span>
<span class="sd">        x - \lambda, &amp; \text{ if } x &gt; \lambda \\</span>
<span class="sd">        x + \lambda, &amp; \text{ if } x &lt; -\lambda \\</span>
<span class="sd">        0, &amp; \text{ otherwise }</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Args:</span>
<span class="sd">        lambd: the :math:`\lambda` (must be no less than zero) value for the Softshrink formulation. Default: 0.5</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/Softshrink.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Softshrink()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;lambd&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">lambd</span><span class="o">=</span><span class="mf">0.5</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Softshrink</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">lambd</span> <span class="o">=</span> <span class="n">lambd</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">softshrink</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">lambd</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="nb">str</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">lambd</span><span class="p">)</span></div>


<div class="viewcode-block" id="MultiheadAttention"><a class="viewcode-back" href="../../../../nn.html#torch.nn.MultiheadAttention">[docs]</a><span class="k">class</span> <span class="nc">MultiheadAttention</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Allows the model to jointly attend to information</span>
<span class="sd">    from different representation subspaces.</span>
<span class="sd">    See reference: Attention Is All You Need</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O</span>
<span class="sd">        \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)</span>

<span class="sd">    Args:</span>
<span class="sd">        embed_dim: total dimension of the model.</span>
<span class="sd">        num_heads: parallel attention heads.</span>
<span class="sd">        dropout: a Dropout layer on attn_output_weights. Default: 0.0.</span>
<span class="sd">        bias: add bias as module parameter. Default: True.</span>
<span class="sd">        add_bias_kv: add bias to the key and value sequences at dim=0.</span>
<span class="sd">        add_zero_attn: add a new batch of zeros to the key and</span>
<span class="sd">                       value sequences at dim=1.</span>
<span class="sd">        kdim: total number of features in key. Default: None.</span>
<span class="sd">        vdim: total number of features in key. Default: None.</span>

<span class="sd">        Note: if kdim and vdim are None, they will be set to embed_dim such that</span>
<span class="sd">        query, key, and value have the same number of features.</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)</span>
<span class="sd">        &gt;&gt;&gt; attn_output, attn_output_weights = multihead_attn(query, key, value)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="vm">__annotations__</span> <span class="o">=</span> <span class="p">{</span>
        <span class="s1">&#39;bias_k&#39;</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">_jit_internal</span><span class="o">.</span><span class="n">Optional</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">],</span>
        <span class="s1">&#39;bias_v&#39;</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">_jit_internal</span><span class="o">.</span><span class="n">Optional</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">],</span>
    <span class="p">}</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;q_proj_weight&#39;</span><span class="p">,</span> <span class="s1">&#39;k_proj_weight&#39;</span><span class="p">,</span> <span class="s1">&#39;v_proj_weight&#39;</span><span class="p">,</span> <span class="s1">&#39;in_proj_weight&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">embed_dim</span><span class="p">,</span> <span class="n">num_heads</span><span class="p">,</span> <span class="n">dropout</span><span class="o">=</span><span class="mf">0.</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">add_bias_kv</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">add_zero_attn</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">kdim</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">vdim</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">MultiheadAttention</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">embed_dim</span> <span class="o">=</span> <span class="n">embed_dim</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">kdim</span> <span class="o">=</span> <span class="n">kdim</span> <span class="k">if</span> <span class="n">kdim</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="n">embed_dim</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">vdim</span> <span class="o">=</span> <span class="n">vdim</span> <span class="k">if</span> <span class="n">vdim</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="n">embed_dim</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_qkv_same_embed_dim</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">kdim</span> <span class="o">==</span> <span class="n">embed_dim</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">vdim</span> <span class="o">==</span> <span class="n">embed_dim</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">num_heads</span> <span class="o">=</span> <span class="n">num_heads</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dropout</span> <span class="o">=</span> <span class="n">dropout</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">head_dim</span> <span class="o">=</span> <span class="n">embed_dim</span> <span class="o">//</span> <span class="n">num_heads</span>
        <span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">head_dim</span> <span class="o">*</span> <span class="n">num_heads</span> <span class="o">==</span> <span class="bp">self</span><span class="o">.</span><span class="n">embed_dim</span><span class="p">,</span> <span class="s2">&quot;embed_dim must be divisible by num_heads&quot;</span>

        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_qkv_same_embed_dim</span> <span class="ow">is</span> <span class="kc">False</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">q_proj_weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">embed_dim</span><span class="p">,</span> <span class="n">embed_dim</span><span class="p">))</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">k_proj_weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">embed_dim</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">kdim</span><span class="p">))</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">v_proj_weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">embed_dim</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">vdim</span><span class="p">))</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">register_parameter</span><span class="p">(</span><span class="s1">&#39;in_proj_weight&#39;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">in_proj_weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">empty</span><span class="p">(</span><span class="mi">3</span> <span class="o">*</span> <span class="n">embed_dim</span><span class="p">,</span> <span class="n">embed_dim</span><span class="p">))</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">register_parameter</span><span class="p">(</span><span class="s1">&#39;q_proj_weight&#39;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">register_parameter</span><span class="p">(</span><span class="s1">&#39;k_proj_weight&#39;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">register_parameter</span><span class="p">(</span><span class="s1">&#39;v_proj_weight&#39;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">bias</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">in_proj_bias</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">empty</span><span class="p">(</span><span class="mi">3</span> <span class="o">*</span> <span class="n">embed_dim</span><span class="p">))</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">register_parameter</span><span class="p">(</span><span class="s1">&#39;in_proj_bias&#39;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">out_proj</span> <span class="o">=</span> <span class="n">Linear</span><span class="p">(</span><span class="n">embed_dim</span><span class="p">,</span> <span class="n">embed_dim</span><span class="p">,</span> <span class="n">bias</span><span class="o">=</span><span class="n">bias</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">add_bias_kv</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bias_k</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">empty</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">embed_dim</span><span class="p">))</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bias_v</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">empty</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">embed_dim</span><span class="p">))</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bias_k</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias_v</span> <span class="o">=</span> <span class="kc">None</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">add_zero_attn</span> <span class="o">=</span> <span class="n">add_zero_attn</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">_reset_parameters</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">_reset_parameters</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_qkv_same_embed_dim</span><span class="p">:</span>
            <span class="n">xavier_uniform_</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">in_proj_weight</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">xavier_uniform_</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">q_proj_weight</span><span class="p">)</span>
            <span class="n">xavier_uniform_</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">k_proj_weight</span><span class="p">)</span>
            <span class="n">xavier_uniform_</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">v_proj_weight</span><span class="p">)</span>

        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">in_proj_bias</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">constant_</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">in_proj_bias</span><span class="p">,</span> <span class="mf">0.</span><span class="p">)</span>
            <span class="n">constant_</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">out_proj</span><span class="o">.</span><span class="n">bias</span><span class="p">,</span> <span class="mf">0.</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias_k</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">xavier_normal_</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">bias_k</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias_v</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">xavier_normal_</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">bias_v</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">__setstate__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
        <span class="c1"># Support loading old MultiheadAttention checkpoints generated by v1.1.0</span>
        <span class="k">if</span> <span class="s1">&#39;_qkv_same_embed_dim&#39;</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">state</span><span class="p">:</span>
            <span class="n">state</span><span class="p">[</span><span class="s1">&#39;_qkv_same_embed_dim&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="kc">True</span>

        <span class="nb">super</span><span class="p">(</span><span class="n">MultiheadAttention</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="n">__setstate__</span><span class="p">(</span><span class="n">state</span><span class="p">)</span>

<div class="viewcode-block" id="MultiheadAttention.forward"><a class="viewcode-back" href="../../../../nn.html#torch.nn.MultiheadAttention.forward">[docs]</a>    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">query</span><span class="p">,</span> <span class="n">key</span><span class="p">,</span> <span class="n">value</span><span class="p">,</span> <span class="n">key_padding_mask</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                <span class="n">need_weights</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">attn_mask</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="c1"># type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor]) -&gt; Tuple[Tensor, Optional[Tensor]]</span>
        <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Args:</span>
<span class="sd">        query, key, value: map a query and a set of key-value pairs to an output.</span>
<span class="sd">            See &quot;Attention Is All You Need&quot; for more details.</span>
<span class="sd">        key_padding_mask: if provided, specified padding elements in the key will</span>
<span class="sd">            be ignored by the attention. This is an binary mask. When the value is True,</span>
<span class="sd">            the corresponding value on the attention layer will be filled with -inf.</span>
<span class="sd">        need_weights: output attn_output_weights.</span>
<span class="sd">        attn_mask: 2D or 3D mask that prevents attention to certain positions. This is an additive mask</span>
<span class="sd">            (i.e. the values will be added to the attention layer). A 2D mask will be broadcasted for all</span>
<span class="sd">            the batches while a 3D mask allows to specify a different mask for the entries of each batch.</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Inputs:</span>
<span class="sd">        - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is</span>
<span class="sd">          the embedding dimension.</span>
<span class="sd">        - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is</span>
<span class="sd">          the embedding dimension.</span>
<span class="sd">        - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is</span>
<span class="sd">          the embedding dimension.</span>
<span class="sd">        - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length.</span>
<span class="sd">        - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.</span>
<span class="sd">          3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,</span>
<span class="sd">          S is the source sequence length.</span>

<span class="sd">        - Outputs:</span>
<span class="sd">        - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,</span>
<span class="sd">          E is the embedding dimension.</span>
<span class="sd">        - attn_output_weights: :math:`(N, L, S)` where N is the batch size,</span>
<span class="sd">          L is the target sequence length, S is the source sequence length.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">_qkv_same_embed_dim</span><span class="p">:</span>
            <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">multi_head_attention_forward</span><span class="p">(</span>
                <span class="n">query</span><span class="p">,</span> <span class="n">key</span><span class="p">,</span> <span class="n">value</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">embed_dim</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_heads</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">in_proj_weight</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">in_proj_bias</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">bias_k</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias_v</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">add_zero_attn</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">dropout</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">out_proj</span><span class="o">.</span><span class="n">weight</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">out_proj</span><span class="o">.</span><span class="n">bias</span><span class="p">,</span>
                <span class="n">training</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">training</span><span class="p">,</span>
                <span class="n">key_padding_mask</span><span class="o">=</span><span class="n">key_padding_mask</span><span class="p">,</span> <span class="n">need_weights</span><span class="o">=</span><span class="n">need_weights</span><span class="p">,</span>
                <span class="n">attn_mask</span><span class="o">=</span><span class="n">attn_mask</span><span class="p">,</span> <span class="n">use_separate_proj_weight</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
                <span class="n">q_proj_weight</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">q_proj_weight</span><span class="p">,</span> <span class="n">k_proj_weight</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">k_proj_weight</span><span class="p">,</span>
                <span class="n">v_proj_weight</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">v_proj_weight</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">multi_head_attention_forward</span><span class="p">(</span>
                <span class="n">query</span><span class="p">,</span> <span class="n">key</span><span class="p">,</span> <span class="n">value</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">embed_dim</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_heads</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">in_proj_weight</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">in_proj_bias</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">bias_k</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias_v</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">add_zero_attn</span><span class="p">,</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">dropout</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">out_proj</span><span class="o">.</span><span class="n">weight</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">out_proj</span><span class="o">.</span><span class="n">bias</span><span class="p">,</span>
                <span class="n">training</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">training</span><span class="p">,</span>
                <span class="n">key_padding_mask</span><span class="o">=</span><span class="n">key_padding_mask</span><span class="p">,</span> <span class="n">need_weights</span><span class="o">=</span><span class="n">need_weights</span><span class="p">,</span>
                <span class="n">attn_mask</span><span class="o">=</span><span class="n">attn_mask</span><span class="p">)</span></div></div>


<div class="viewcode-block" id="PReLU"><a class="viewcode-back" href="../../../../nn.html#torch.nn.PReLU">[docs]</a><span class="k">class</span> <span class="nc">PReLU</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{PReLU}(x) = \max(0,x) + a * \min(0,x)</span>

<span class="sd">    or</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{PReLU}(x) =</span>
<span class="sd">        \begin{cases}</span>
<span class="sd">        x, &amp; \text{ if } x \geq 0 \\</span>
<span class="sd">        ax, &amp; \text{ otherwise }</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Here :math:`a` is a learnable parameter. When called without arguments, `nn.PReLU()` uses a single</span>
<span class="sd">    parameter :math:`a` across all input channels. If called with `nn.PReLU(nChannels)`,</span>
<span class="sd">    a separate :math:`a` is used for each input channel.</span>


<span class="sd">    .. note::</span>
<span class="sd">        weight decay should not be used when learning :math:`a` for good performance.</span>

<span class="sd">    .. note::</span>
<span class="sd">        Channel dim is the 2nd dim of input. When input has dims &lt; 2, then there is</span>
<span class="sd">        no channel dim and the number of channels = 1.</span>

<span class="sd">    Args:</span>
<span class="sd">        num_parameters (int): number of :math:`a` to learn.</span>
<span class="sd">            Although it takes an int as input, there is only two values are legitimate:</span>
<span class="sd">            1, or the number of channels at input. Default: 1</span>
<span class="sd">        init (float): the initial value of :math:`a`. Default: 0.25</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    Attributes:</span>
<span class="sd">        weight (Tensor): the learnable weights of shape (:attr:`num_parameters`).</span>

<span class="sd">    .. image:: scripts/activation_images/PReLU.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.PReLU()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;num_parameters&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">num_parameters</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">init</span><span class="o">=</span><span class="mf">0.25</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">num_parameters</span> <span class="o">=</span> <span class="n">num_parameters</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">PReLU</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">num_parameters</span><span class="p">)</span><span class="o">.</span><span class="n">fill_</span><span class="p">(</span><span class="n">init</span><span class="p">))</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">prelu</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="s1">&#39;num_parameters=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">num_parameters</span><span class="p">)</span></div>


<div class="viewcode-block" id="Softsign"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Softsign">[docs]</a><span class="k">class</span> <span class="nc">Softsign</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{SoftSign}(x) = \frac{x}{ 1 + |x|}</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/Softsign.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Softsign()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">softsign</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span></div>


<div class="viewcode-block" id="Tanhshrink"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Tanhshrink">[docs]</a><span class="k">class</span> <span class="nc">Tanhshrink</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the element-wise function:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{Tanhshrink}(x) = x - \tanh(x)</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, *)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(N, *)`, same shape as the input</span>

<span class="sd">    .. image:: scripts/activation_images/Tanhshrink.png</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Tanhshrink()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">tanhshrink</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span></div>


<div class="viewcode-block" id="Softmin"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Softmin">[docs]</a><span class="k">class</span> <span class="nc">Softmin</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the Softmin function to an n-dimensional input Tensor</span>
<span class="sd">    rescaling them so that the elements of the n-dimensional output Tensor</span>
<span class="sd">    lie in the range `[0, 1]` and sum to 1.</span>

<span class="sd">    Softmin is defined as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)}</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(*)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(*)`, same shape as the input</span>

<span class="sd">    Arguments:</span>
<span class="sd">        dim (int): A dimension along which Softmin will be computed (so every slice</span>
<span class="sd">            along dim will sum to 1).</span>

<span class="sd">    Returns:</span>
<span class="sd">        a Tensor of the same dimension and shape as the input, with</span>
<span class="sd">        values in the range [0, 1]</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Softmin()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2, 3)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;dim&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Softmin</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dim</span> <span class="o">=</span> <span class="n">dim</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">softmin</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">dim</span><span class="p">,</span> <span class="n">_stacklevel</span><span class="o">=</span><span class="mi">5</span><span class="p">)</span></div>


<div class="viewcode-block" id="Softmax"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Softmax">[docs]</a><span class="k">class</span> <span class="nc">Softmax</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the Softmax function to an n-dimensional input Tensor</span>
<span class="sd">    rescaling them so that the elements of the n-dimensional output Tensor</span>
<span class="sd">    lie in the range [0,1] and sum to 1.</span>

<span class="sd">    Softmax is defined as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(*)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(*)`, same shape as the input</span>

<span class="sd">    Returns:</span>
<span class="sd">        a Tensor of the same dimension and shape as the input with</span>
<span class="sd">        values in the range [0, 1]</span>

<span class="sd">    Arguments:</span>
<span class="sd">        dim (int): A dimension along which Softmax will be computed (so every slice</span>
<span class="sd">            along dim will sum to 1).</span>

<span class="sd">    .. note::</span>
<span class="sd">        This module doesn&#39;t work directly with NLLLoss,</span>
<span class="sd">        which expects the Log to be computed between the Softmax and itself.</span>
<span class="sd">        Use `LogSoftmax` instead (it&#39;s faster and has better numerical properties).</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Softmax(dim=1)</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2, 3)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;dim&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Softmax</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dim</span> <span class="o">=</span> <span class="n">dim</span>

    <span class="k">def</span> <span class="nf">__setstate__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="vm">__dict__</span><span class="o">.</span><span class="n">update</span><span class="p">(</span><span class="n">state</span><span class="p">)</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="nb">hasattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="s1">&#39;dim&#39;</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">dim</span> <span class="o">=</span> <span class="kc">None</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">softmax</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">dim</span><span class="p">,</span> <span class="n">_stacklevel</span><span class="o">=</span><span class="mi">5</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">extra_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="s1">&#39;dim=</span><span class="si">{dim}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">dim</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">dim</span><span class="p">)</span></div>


<div class="viewcode-block" id="Softmax2d"><a class="viewcode-back" href="../../../../nn.html#torch.nn.Softmax2d">[docs]</a><span class="k">class</span> <span class="nc">Softmax2d</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies SoftMax over features to each spatial location.</span>

<span class="sd">    When given an image of ``Channels x Height x Width``, it will</span>
<span class="sd">    apply `Softmax` to each location :math:`(Channels, h_i, w_j)`</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(N, C, H, W)`</span>
<span class="sd">        - Output: :math:`(N, C, H, W)` (same shape as input)</span>

<span class="sd">    Returns:</span>
<span class="sd">        a Tensor of the same dimension and shape as the input with</span>
<span class="sd">        values in the range [0, 1]</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.Softmax2d()</span>
<span class="sd">        &gt;&gt;&gt; # you softmax over the 2nd dimension</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2, 3, 12, 13)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">assert</span> <span class="nb">input</span><span class="o">.</span><span class="n">dim</span><span class="p">()</span> <span class="o">==</span> <span class="mi">4</span><span class="p">,</span> <span class="s1">&#39;Softmax2d requires a 4D tensor as input&#39;</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">softmax</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">_stacklevel</span><span class="o">=</span><span class="mi">5</span><span class="p">)</span></div>


<div class="viewcode-block" id="LogSoftmax"><a class="viewcode-back" href="../../../../nn.html#torch.nn.LogSoftmax">[docs]</a><span class="k">class</span> <span class="nc">LogSoftmax</span><span class="p">(</span><span class="n">Module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional</span>
<span class="sd">    input Tensor. The LogSoftmax formulation can be simplified as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)</span>

<span class="sd">    Shape:</span>
<span class="sd">        - Input: :math:`(*)` where `*` means, any number of additional</span>
<span class="sd">          dimensions</span>
<span class="sd">        - Output: :math:`(*)`, same shape as the input</span>

<span class="sd">    Arguments:</span>
<span class="sd">        dim (int): A dimension along which LogSoftmax will be computed.</span>

<span class="sd">    Returns:</span>
<span class="sd">        a Tensor of the same dimension and shape as the input with</span>
<span class="sd">        values in the range [-inf, 0)</span>

<span class="sd">    Examples::</span>

<span class="sd">        &gt;&gt;&gt; m = nn.LogSoftmax()</span>
<span class="sd">        &gt;&gt;&gt; input = torch.randn(2, 3)</span>
<span class="sd">        &gt;&gt;&gt; output = m(input)</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__constants__</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;dim&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">LogSoftmax</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dim</span> <span class="o">=</span> <span class="n">dim</span>

    <span class="k">def</span> <span class="nf">__setstate__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="vm">__dict__</span><span class="o">.</span><span class="n">update</span><span class="p">(</span><span class="n">state</span><span class="p">)</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="nb">hasattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="s1">&#39;dim&#39;</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">dim</span> <span class="o">=</span> <span class="kc">None</span>

    <span class="k">def</span> <span class="nf">forward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">F</span><span class="o">.</span><span class="n">log_softmax</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">dim</span><span class="p">,</span> <span class="n">_stacklevel</span><span class="o">=</span><span class="mi">5</span><span class="p">)</span></div>
</pre></div>

             </article>
             
            </div>
            <footer>
  

  

    <hr>

  

  <div role="contentinfo">
    <p>
        &copy; Copyright 2019, Torch Contributors.

    </p>
  </div>
    
      <div>
        Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
      </div>
     

</footer>

          </div>
        </div>

        <div class="pytorch-content-right" id="pytorch-content-right">
          <div class="pytorch-right-menu" id="pytorch-right-menu">
            <div class="pytorch-side-scroll" id="pytorch-side-scroll-right">
              
            </div>
          </div>
        </div>
      </section>
    </div>

  


  

     
       <script type="text/javascript" id="documentation_options" data-url_root="../../../../" src="../../../../_static/documentation_options.js"></script>
         <script src="../../../../_static/jquery.js"></script>
         <script src="../../../../_static/underscore.js"></script>
         <script src="../../../../_static/doctools.js"></script>
         <script src="../../../../_static/language_data.js"></script>
     

  

  <script type="text/javascript" src="../../../../_static/js/vendor/popper.min.js"></script>
  <script type="text/javascript" src="../../../../_static/js/vendor/bootstrap.min.js"></script>
  <script src="https://cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
  <script type="text/javascript" src="../../../../_static/js/theme.js"></script>

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>
 
<script>
  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
  })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

  ga('create', 'UA-90545585-1', 'auto');
  ga('send', 'pageview');

</script>

<script async src="https://www.googletagmanager.com/gtag/js?id=UA-117752657-2"></script>

<script>
  window.dataLayer = window.dataLayer || [];

  function gtag(){dataLayer.push(arguments);}

  gtag('js', new Date());
  gtag('config', 'UA-117752657-2');
</script>

<img height="1" width="1" style="border-style:none;" alt="" src="https://www.googleadservices.com/pagead/conversion/795629140/?label=txkmCPmdtosBENSssfsC&amp;guid=ON&amp;script=0"/>


  <!-- Begin Footer -->

  <div class="container-fluid docs-tutorials-resources" id="docs-tutorials-resources">
    <div class="container">
      <div class="row">
        <div class="col-md-4 text-center">
          <h2>Docs</h2>
          <p>Access comprehensive developer documentation for PyTorch</p>
          <a class="with-right-arrow" href="https://pytorch.org/docs/stable/index.html">View Docs</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Tutorials</h2>
          <p>Get in-depth tutorials for beginners and advanced developers</p>
          <a class="with-right-arrow" href="https://pytorch.org/tutorials">View Tutorials</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Resources</h2>
          <p>Find development resources and get your questions answered</p>
          <a class="with-right-arrow" href="https://pytorch.org/resources">View Resources</a>
        </div>
      </div>
    </div>
  </div>

  <footer class="site-footer">
    <div class="container footer-container">
      <div class="footer-logo-wrapper">
        <a href="https://pytorch.org/" class="footer-logo"></a>
      </div>

      <div class="footer-links-wrapper">
        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/">PyTorch</a></li>
            <li><a href="https://pytorch.org/get-started">Get Started</a></li>
            <li><a href="https://pytorch.org/features">Features</a></li>
            <li><a href="https://pytorch.org/ecosystem">Ecosystem</a></li>
            <li><a href="https://pytorch.org/blog/">Blog</a></li>
            <li><a href="https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md">Contributing</a></li>
          </ul>
        </div>

        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/resources">Resources</a></li>
            <li><a href="https://pytorch.org/tutorials">Tutorials</a></li>
            <li><a href="https://pytorch.org/docs/stable/index.html">Docs</a></li>
            <li><a href="https://discuss.pytorch.org" target="_blank">Discuss</a></li>
            <li><a href="https://github.com/pytorch/pytorch/issues" target="_blank">Github Issues</a></li>
            <li><a href="https://pytorch.org/assets/brand-guidelines/PyTorch-Brand-Guidelines.pdf" target="_blank">Brand Guidelines</a></li>
          </ul>
        </div>

        <div class="footer-links-col follow-us-col">
          <ul>
            <li class="list-title">Stay Connected</li>
            <li>
              <div id="mc_embed_signup">
                <form
                  action="https://twitter.us14.list-manage.com/subscribe/post?u=75419c71fe0a935e53dfa4a3f&id=91d0dccd39"
                  method="post"
                  id="mc-embedded-subscribe-form"
                  name="mc-embedded-subscribe-form"
                  class="email-subscribe-form validate"
                  target="_blank"
                  novalidate>
                  <div id="mc_embed_signup_scroll" class="email-subscribe-form-fields-wrapper">
                    <div class="mc-field-group">
                      <label for="mce-EMAIL" style="display:none;">Email Address</label>
                      <input type="email" value="" name="EMAIL" class="required email" id="mce-EMAIL" placeholder="Email Address">
                    </div>

                    <div id="mce-responses" class="clear">
                      <div class="response" id="mce-error-response" style="display:none"></div>
                      <div class="response" id="mce-success-response" style="display:none"></div>
                    </div>    <!-- real people should not fill this in and expect good things - do not remove this or risk form bot signups-->

                    <div style="position: absolute; left: -5000px;" aria-hidden="true"><input type="text" name="b_75419c71fe0a935e53dfa4a3f_91d0dccd39" tabindex="-1" value=""></div>

                    <div class="clear">
                      <input type="submit" value="" name="subscribe" id="mc-embedded-subscribe" class="button email-subscribe-button">
                    </div>
                  </div>
                </form>
              </div>

            </li>
          </ul>

          <div class="footer-social-icons">
            <a href="https://www.facebook.com/pytorch" target="_blank" class="facebook"></a>
            <a href="https://twitter.com/pytorch" target="_blank" class="twitter"></a>
            <a href="https://www.youtube.com/pytorch" target="_blank" class="youtube"></a>
          </div>
        </div>
      </div>
    </div>
  </footer>

  <div class="cookie-banner-wrapper">
  <div class="container">
    <p class="gdpr-notice">To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: <a href="https://www.facebook.com/policies/cookies/">Cookies Policy</a>.</p>
    <img class="close-button" src="../../../../_static/images/pytorch-x.svg">
  </div>
</div>

  <!-- End Footer -->

  <!-- Begin Mobile Menu -->

  <div class="mobile-main-menu">
    <div class="container-fluid">
      <div class="container">
        <div class="mobile-main-menu-header-container">
          <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
          <a class="main-menu-close-button" href="#" data-behavior="close-mobile-menu"></a>
        </div>
      </div>
    </div>

    <div class="mobile-main-menu-links-container">
      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <a href="https://pytorch.org/features">Features</a>
          </li>

          <li>
            <a href="https://pytorch.org/ecosystem">Ecosystem</a>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/hub">PyTorch Hub</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <a href="https://pytorch.org/resources">Resources</a>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>
    </div>
  </div>

  <!-- End Mobile Menu -->

  <script type="text/javascript" src="../../../../_static/js/vendor/anchor.min.js"></script>

  <script type="text/javascript">
    $(document).ready(function() {
      mobileMenu.bind();
      mobileTOC.bind();
      pytorchAnchors.bind();
      sideMenus.bind();
      scrollToAnchor.bind();
      highlightNavigation.bind();
      mainMenuDropdown.bind();
      filterTags.bind();

      // Remove any empty p tags that Sphinx adds
      $("[data-tags='null']").remove();

      // Add class to links that have code blocks, since we cannot create links in code blocks
      $("article.pytorch-article a span.pre").each(function(e) {
        $(this).closest("a").addClass("has-code");
      });
    })
  </script>
</body>
</html>