

<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8" />
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
  
  <title>mindspore.nn &mdash; MindSpore master documentation</title>
  

  
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />

  
  

  
  

  

  
  <!--[if lt IE 9]>
    <script src="../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
        <script src="../_static/jquery.js"></script>
        <script src="../_static/underscore.js"></script>
        <script src="../_static/doctools.js"></script>
        <script src="../_static/language_data.js"></script>
        <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
    
    <script type="text/javascript" src="../_static/js/theme.js"></script>

    
    <link rel="index" title="Index" href="../genindex.html" />
    <link rel="search" title="Search" href="../search.html" />
    <link rel="next" title="mindspore.nn.Cell" href="nn/mindspore.nn.Cell.html" />
    <link rel="prev" title="mindspore.mindrecord" href="mindspore.mindrecord.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../index.html" class="icon icon-home"> MindSpore
          

          
          </a>

          
            
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">MindSpore Python API</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="mindspore.html">mindspore</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.common.initializer.html">mindspore.common.initializer</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.communication.html">mindspore.communication</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.compression.html">mindspore.compression</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.context.html">mindspore.context</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.html">mindspore.dataset</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.audio.html">mindspore.dataset.audio</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.config.html">mindspore.dataset.config</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.text.html">mindspore.dataset.text</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.transforms.html">mindspore.dataset.transforms</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.vision.html">mindspore.dataset.vision</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.mindrecord.html">mindspore.mindrecord</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">mindspore.nn</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#id1">基本构成单元</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Cell.html">mindspore.nn.Cell</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.GraphCell.html">mindspore.nn.GraphCell</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id2">容器</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.CellList.html">mindspore.nn.CellList</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.SequentialCell.html">mindspore.nn.SequentialCell</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id3">卷积层</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Conv1d.html">mindspore.nn.Conv1d</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Conv1dTranspose.html">mindspore.nn.Conv1dTranspose</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Conv2d.html">mindspore.nn.Conv2d</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Conv2dTranspose.html">mindspore.nn.Conv2dTranspose</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Conv3d.html">mindspore.nn.Conv3d</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Conv3dTranspose.html">mindspore.nn.Conv3dTranspose</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id4">梯度</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Jvp.html">mindspore.nn.Jvp</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Vjp.html">mindspore.nn.Vjp</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id5">循环层</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.GRUCell.html">mindspore.nn.GRUCell</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.GRU.html">mindspore.nn.GRU</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.LSTMCell.html">mindspore.nn.LSTMCell</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.LSTM.html">mindspore.nn.LSTM</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.RNNCell.html">mindspore.nn.RNNCell</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.RNN.html">mindspore.nn.RNN</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id6">稀疏层</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Embedding.html">mindspore.nn.Embedding</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.EmbeddingLookup.html">mindspore.nn.EmbeddingLookup</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MultiFieldEmbeddingLookup.html">mindspore.nn.MultiFieldEmbeddingLookup</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.SparseToDense.html">mindspore.nn.SparseToDense</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.SparseTensorDenseMatmul.html">mindspore.nn.SparseTensorDenseMatmul</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id7">非线性激活函数</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.CELU.html">mindspore.nn.CELU</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ELU.html">mindspore.nn.ELU</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.FastGelu.html">mindspore.nn.FastGelu</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.GELU.html">mindspore.nn.GELU</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.get_activation.html">mindspore.nn.get_activation</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.HShrink.html">mindspore.nn.HShrink</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.HSigmoid.html">mindspore.nn.HSigmoid</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.HSwish.html">mindspore.nn.HSwish</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.LeakyReLU.html">mindspore.nn.LeakyReLU</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.LogSigmoid.html">mindspore.nn.LogSigmoid</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.LogSoftmax.html">mindspore.nn.LogSoftmax</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.PReLU.html">mindspore.nn.PReLU</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ReLU.html">mindspore.nn.ReLU</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ReLU6.html">mindspore.nn.ReLU6</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Sigmoid.html">mindspore.nn.Sigmoid</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Softmax.html">mindspore.nn.Softmax</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.SoftShrink.html">mindspore.nn.SoftShrink</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Tanh.html">mindspore.nn.Tanh</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id8">工具</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ClipByNorm.html">mindspore.nn.ClipByNorm</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Dense.html">mindspore.nn.Dense</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Dropout.html">mindspore.nn.Dropout</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Flatten.html">mindspore.nn.Flatten</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.L1Regularizer.html">mindspore.nn.L1Regularizer</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Norm.html">mindspore.nn.Norm</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.OneHot.html">mindspore.nn.OneHot</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Pad.html">mindspore.nn.Pad</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Range.html">mindspore.nn.Range</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ResizeBilinear.html">mindspore.nn.ResizeBilinear</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Roll.html">mindspore.nn.Roll</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Tril.html">mindspore.nn.Tril</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Triu.html">mindspore.nn.Triu</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Unfold.html">mindspore.nn.Unfold</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id9">图像</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.CentralCrop.html">mindspore.nn.CentralCrop</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ImageGradients.html">mindspore.nn.ImageGradients</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MSSSIM.html">mindspore.nn.MSSSIM</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.PSNR.html">mindspore.nn.PSNR</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.SSIM.html">mindspore.nn.SSIM</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id10">归一化层</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.BatchNorm1d.html">mindspore.nn.BatchNorm1d</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.BatchNorm2d.html">mindspore.nn.BatchNorm2d</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.BatchNorm3d.html">mindspore.nn.BatchNorm3d</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.GlobalBatchNorm.html">mindspore.nn.GlobalBatchNorm</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.GroupNorm.html">mindspore.nn.GroupNorm</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.InstanceNorm2d.html">mindspore.nn.InstanceNorm2d</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.LayerNorm.html">mindspore.nn.LayerNorm</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MatrixDiag.html">mindspore.nn.MatrixDiag</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MatrixDiagPart.html">mindspore.nn.MatrixDiagPart</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MatrixSetDiag.html">mindspore.nn.MatrixSetDiag</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.SyncBatchNorm.html">mindspore.nn.SyncBatchNorm</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id11">池化层</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.AvgPool1d.html">mindspore.nn.AvgPool1d</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.AvgPool2d.html">mindspore.nn.AvgPool2d</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MaxPool1d.html">mindspore.nn.MaxPool1d</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MaxPool2d.html">mindspore.nn.MaxPool2d</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id12">量化</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ActQuant.html">mindspore.nn.ActQuant</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Conv2dBnAct.html">mindspore.nn.Conv2dBnAct</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Conv2dBnFoldQuant.html">mindspore.nn.Conv2dBnFoldQuant</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Conv2dBnFoldQuantOneConv.html">mindspore.nn.Conv2dBnFoldQuantOneConv</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Conv2dBnWithoutFoldQuant.html">mindspore.nn.Conv2dBnWithoutFoldQuant</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Conv2dQuant.html">mindspore.nn.Conv2dQuant</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.DenseBnAct.html">mindspore.nn.DenseBnAct</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.DenseQuant.html">mindspore.nn.DenseQuant</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.FakeQuantWithMinMaxObserver.html">mindspore.nn.FakeQuantWithMinMaxObserver</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MulQuant.html">mindspore.nn.MulQuant</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.TensorAddQuant.html">mindspore.nn.TensorAddQuant</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id13">损失函数</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.BCELoss.html">mindspore.nn.BCELoss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.BCEWithLogitsLoss.html">mindspore.nn.BCEWithLogitsLoss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.CosineEmbeddingLoss.html">mindspore.nn.CosineEmbeddingLoss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.DiceLoss.html">mindspore.nn.DiceLoss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.FocalLoss.html">mindspore.nn.FocalLoss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.L1Loss.html">mindspore.nn.L1Loss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.LossBase.html">mindspore.nn.LossBase</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MSELoss.html">mindspore.nn.MSELoss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MultiClassDiceLoss.html">mindspore.nn.MultiClassDiceLoss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.RMSELoss.html">mindspore.nn.RMSELoss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.SampledSoftmaxLoss.html">mindspore.nn.SampledSoftmaxLoss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.SmoothL1Loss.html">mindspore.nn.SmoothL1Loss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.SoftMarginLoss.html">mindspore.nn.SoftMarginLoss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.SoftmaxCrossEntropyWithLogits.html">mindspore.nn.SoftmaxCrossEntropyWithLogits</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id14">优化器</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Adagrad.html">mindspore.nn.Adagrad</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Adam.html">mindspore.nn.Adam</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.AdamOffload.html">mindspore.nn.AdamOffload</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.AdamWeightDecay.html">mindspore.nn.AdamWeightDecay</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ASGD.html">mindspore.nn.ASGD</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.FTRL.html">mindspore.nn.FTRL</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Lamb.html">mindspore.nn.Lamb</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.LARS.html">mindspore.nn.LARS</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.LazyAdam.html">mindspore.nn.LazyAdam</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Momentum.html">mindspore.nn.Momentum</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Optimizer.html">mindspore.nn.Optimizer</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ProximalAdagrad.html">mindspore.nn.ProximalAdagrad</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.RMSProp.html">mindspore.nn.RMSProp</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Rprop.html">mindspore.nn.Rprop</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.SGD.html">mindspore.nn.SGD</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.thor.html">mindspore.nn.thor</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#wrapper">Wrapper</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.DistributedGradReducer.html">mindspore.nn.DistributedGradReducer</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.DynamicLossScaleUpdateCell.html">mindspore.nn.DynamicLossScaleUpdateCell</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.FixedLossScaleUpdateCell.html">mindspore.nn.FixedLossScaleUpdateCell</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ForwardValueAndGrad.html">mindspore.nn.ForwardValueAndGrad</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.GetNextSingleOp.html">mindspore.nn.GetNextSingleOp</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MicroBatchInterleaved.html">mindspore.nn.MicroBatchInterleaved</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ParameterUpdate.html">mindspore.nn.ParameterUpdate</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.PipelineCell.html">mindspore.nn.PipelineCell</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.TimeDistributed.html">mindspore.nn.TimeDistributed</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.TrainOneStepCell.html">mindspore.nn.TrainOneStepCell</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.TrainOneStepWithLossScaleCell.html">mindspore.nn.TrainOneStepWithLossScaleCell</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.WithEvalCell.html">mindspore.nn.WithEvalCell</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.WithGradCell.html">mindspore.nn.WithGradCell</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.WithLossCell.html">mindspore.nn.WithLossCell</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id15">数学运算</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MatMul.html">mindspore.nn.MatMul</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Moments.html">mindspore.nn.Moments</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ReduceLogSumExp.html">mindspore.nn.ReduceLogSumExp</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id16">评估指标</a><ul>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Accuracy.html">mindspore.nn.Accuracy</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.auc.html">mindspore.nn.auc</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.BleuScore.html">mindspore.nn.BleuScore</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ConfusionMatrix.html">mindspore.nn.ConfusionMatrix</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ConfusionMatrixMetric.html">mindspore.nn.ConfusionMatrixMetric</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.CosineSimilarity.html">mindspore.nn.CosineSimilarity</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Dice.html">mindspore.nn.Dice</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.F1.html">mindspore.nn.F1</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Fbeta.html">mindspore.nn.Fbeta</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.HausdorffDistance.html">mindspore.nn.HausdorffDistance</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.get_metric_fn.html">mindspore.nn.get_metric_fn</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Loss.html">mindspore.nn.Loss</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MAE.html">mindspore.nn.MAE</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MeanSurfaceDistance.html">mindspore.nn.MeanSurfaceDistance</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Metric.html">mindspore.nn.Metric</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.MSE.html">mindspore.nn.MSE</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.names.html">mindspore.nn.names</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.OcclusionSensitivity.html">mindspore.nn.OcclusionSensitivity</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Perplexity.html">mindspore.nn.Perplexity</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Precision.html">mindspore.nn.Precision</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Recall.html">mindspore.nn.Recall</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.ROC.html">mindspore.nn.ROC</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.RootMeanSquareDistance.html">mindspore.nn.RootMeanSquareDistance</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.rearrange_inputs.html">mindspore.nn.rearrange_inputs</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Top1CategoricalAccuracy.html">mindspore.nn.Top1CategoricalAccuracy</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.Top5CategoricalAccuracy.html">mindspore.nn.Top5CategoricalAccuracy</a></li>
<li class="toctree-l3"><a class="reference internal" href="nn/mindspore.nn.TopKCategoricalAccuracy.html">mindspore.nn.TopKCategoricalAccuracy</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#id17">动态学习率</a><ul>
<li class="toctree-l3"><a class="reference internal" href="#learningrateschedule">LearningRateSchedule类</a><ul>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.CosineDecayLR.html">mindspore.nn.CosineDecayLR</a></li>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.ExponentialDecayLR.html">mindspore.nn.ExponentialDecayLR</a></li>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.InverseDecayLR.html">mindspore.nn.InverseDecayLR</a></li>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.NaturalExpDecayLR.html">mindspore.nn.NaturalExpDecayLR</a></li>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.PolynomialDecayLR.html">mindspore.nn.PolynomialDecayLR</a></li>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.WarmUpLR.html">mindspore.nn.WarmUpLR</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="#dynamic-lr">Dynamic LR函数</a><ul>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.cosine_decay_lr.html">mindspore.nn.cosine_decay_lr</a></li>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.exponential_decay_lr.html">mindspore.nn.exponential_decay_lr</a></li>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.inverse_decay_lr.html">mindspore.nn.inverse_decay_lr</a></li>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.natural_exp_decay_lr.html">mindspore.nn.natural_exp_decay_lr</a></li>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.piecewise_constant_lr.html">mindspore.nn.piecewise_constant_lr</a></li>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.polynomial_decay_lr.html">mindspore.nn.polynomial_decay_lr</a></li>
<li class="toctree-l4"><a class="reference internal" href="nn/mindspore.nn.warmup_lr.html">mindspore.nn.warmup_lr</a></li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.nn.probability.html">mindspore.nn.probability</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.nn.transformer.html">mindspore.nn.transformer</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.numpy.html">mindspore.numpy</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.ops.html">mindspore.ops</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.parallel.html">mindspore.parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.parallel.nn.html">mindspore.parallel.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.profiler.html">mindspore.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.scipy.html">mindspore.scipy</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.train.html">mindspore.train</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.boost.html">mindspore.boost</a></li>
</ul>
<p class="caption"><span class="caption-text">MindSpore C++ API</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://www.mindspore.cn/lite/api/zh-CN/master/api_cpp/mindspore.html">MindSpore Lite↗</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../index.html">MindSpore</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          

















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../index.html" class="icon icon-home"></a> &raquo;</li>
        
      <li>mindspore.nn</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
          
            <a href="../_sources/api_python/mindspore.nn.rst.txt" rel="nofollow"> View page source</a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="mindspore-nn">
<h1>mindspore.nn<a class="headerlink" href="#mindspore-nn" title="Permalink to this headline">¶</a></h1>
<p>神经网络Cell。</p>
<p>用于构建神经网络中的预定义构建块或计算单元。</p>
<p>MindSpore中 <cite>mindspore.nn</cite> 接口与上一版本相比，新增、删除和支持平台的变化信息请参考 <a class="reference external" href="https://gitee.com/mindspore/docs/blob/master/resource/api_updates/ops_api_updates.md">API Updates</a>。</p>
<div class="section" id="id1">
<h2>基本构成单元<a class="headerlink" href="#id1" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Cell.html#mindspore.nn.Cell" title="mindspore.nn.Cell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Cell</span></code></a></p></td>
<td><p>MindSpore中神经网络的基本构成单元。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.GraphCell.html#mindspore.nn.GraphCell" title="mindspore.nn.GraphCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.GraphCell</span></code></a></p></td>
<td><p>运行从MindIR加载的计算图。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id2">
<h2>容器<a class="headerlink" href="#id2" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.CellList.html#mindspore.nn.CellList" title="mindspore.nn.CellList"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.CellList</span></code></a></p></td>
<td><p>构造Cell列表。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.SequentialCell.html#mindspore.nn.SequentialCell" title="mindspore.nn.SequentialCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.SequentialCell</span></code></a></p></td>
<td><p>构造Cell顺序容器。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id3">
<h2>卷积层<a class="headerlink" href="#id3" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Conv1d.html#mindspore.nn.Conv1d" title="mindspore.nn.Conv1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Conv1d</span></code></a></p></td>
<td><p>一维卷积层。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Conv1dTranspose.html#mindspore.nn.Conv1dTranspose" title="mindspore.nn.Conv1dTranspose"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Conv1dTranspose</span></code></a></p></td>
<td><p>一维转置卷积层。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Conv2d.html#mindspore.nn.Conv2d" title="mindspore.nn.Conv2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Conv2d</span></code></a></p></td>
<td><p>二维卷积层。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Conv2dTranspose.html#mindspore.nn.Conv2dTranspose" title="mindspore.nn.Conv2dTranspose"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Conv2dTranspose</span></code></a></p></td>
<td><p>二维转置卷积层。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Conv3d.html#mindspore.nn.Conv3d" title="mindspore.nn.Conv3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Conv3d</span></code></a></p></td>
<td><p>三维卷积层。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Conv3dTranspose.html#mindspore.nn.Conv3dTranspose" title="mindspore.nn.Conv3dTranspose"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Conv3dTranspose</span></code></a></p></td>
<td><p>三维转置卷积层。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id4">
<h2>梯度<a class="headerlink" href="#id4" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Jvp.html#mindspore.nn.Jvp" title="mindspore.nn.Jvp"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Jvp</span></code></a></p></td>
<td><p>计算给定网络的雅可比向量积(Jacobian-vector product, JVP)。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Vjp.html#mindspore.nn.Vjp" title="mindspore.nn.Vjp"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Vjp</span></code></a></p></td>
<td><p>计算给定网络的向量雅可比积(vector-Jacobian product, VJP)。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id5">
<h2>循环层<a class="headerlink" href="#id5" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.GRUCell.html#mindspore.nn.GRUCell" title="mindspore.nn.GRUCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.GRUCell</span></code></a></p></td>
<td><p>GRU（Gate Recurrent Unit）称为门控循环单元。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.GRU.html#mindspore.nn.GRU" title="mindspore.nn.GRU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.GRU</span></code></a></p></td>
<td><p>GRU（Gate Recurrent Unit）称为门控循环单元网络，是循环神经网络（Recurrent Neural Network, RNN）的一种。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.LSTMCell.html#mindspore.nn.LSTMCell" title="mindspore.nn.LSTMCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.LSTMCell</span></code></a></p></td>
<td><p>长短期记忆网络单元（LSTMCell）。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.LSTM.html#mindspore.nn.LSTM" title="mindspore.nn.LSTM"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.LSTM</span></code></a></p></td>
<td><p>长短期记忆（LSTM）网络，根据输出序列和给定的初始状态计算返回输出序列和最终状态。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.RNNCell.html#mindspore.nn.RNNCell" title="mindspore.nn.RNNCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.RNNCell</span></code></a></p></td>
<td><p>用于定义循环神经网络（RNN）的一个单元，激活函数是tanh或relu。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.RNN.html#mindspore.nn.RNN" title="mindspore.nn.RNN"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.RNN</span></code></a></p></td>
<td><p>循环神经网络（RNN）层，其使用的激活函数为tanh或relu。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id6">
<h2>稀疏层<a class="headerlink" href="#id6" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Embedding.html#mindspore.nn.Embedding" title="mindspore.nn.Embedding"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Embedding</span></code></a></p></td>
<td><p>嵌入层。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.EmbeddingLookup.html#mindspore.nn.EmbeddingLookup" title="mindspore.nn.EmbeddingLookup"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.EmbeddingLookup</span></code></a></p></td>
<td><p>嵌入查找层。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.MultiFieldEmbeddingLookup.html#mindspore.nn.MultiFieldEmbeddingLookup" title="mindspore.nn.MultiFieldEmbeddingLookup"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MultiFieldEmbeddingLookup</span></code></a></p></td>
<td><p>Returns a slice of input tensor based on the specified indices and the field ids.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.SparseToDense.html#mindspore.nn.SparseToDense" title="mindspore.nn.SparseToDense"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.SparseToDense</span></code></a></p></td>
<td><p>Converts a sparse tensor into dense.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.SparseTensorDenseMatmul.html#mindspore.nn.SparseTensorDenseMatmul" title="mindspore.nn.SparseTensorDenseMatmul"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.SparseTensorDenseMatmul</span></code></a></p></td>
<td><p>Multiplies sparse matrix <cite>a</cite> and dense matrix <cite>b</cite>.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id7">
<h2>非线性激活函数<a class="headerlink" href="#id7" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.CELU.html#mindspore.nn.CELU" title="mindspore.nn.CELU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.CELU</span></code></a></p></td>
<td><p>Continuously differentiable exponential linear units activation function.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.ELU.html#mindspore.nn.ELU" title="mindspore.nn.ELU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ELU</span></code></a></p></td>
<td><p>指数线性单元激活函数（Exponential Linear Unit activation function）。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.FastGelu.html#mindspore.nn.FastGelu" title="mindspore.nn.FastGelu"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.FastGelu</span></code></a></p></td>
<td><p>快速高斯误差线性单元激活函数（Fast Gaussian Error Linear Units activation function）。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.GELU.html#mindspore.nn.GELU" title="mindspore.nn.GELU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.GELU</span></code></a></p></td>
<td><p>高斯误差线性单元激活函数（Gaussian error linear unit activation function）。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.get_activation.html#mindspore.nn.get_activation" title="mindspore.nn.get_activation"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.get_activation</span></code></a></p></td>
<td><p>获取激活函数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.HShrink.html#mindspore.nn.HShrink" title="mindspore.nn.HShrink"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.HShrink</span></code></a></p></td>
<td><p>Hard Shrink激活函数，按输入元素计算输出，公式定义如下：。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.HSigmoid.html#mindspore.nn.HSigmoid" title="mindspore.nn.HSigmoid"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.HSigmoid</span></code></a></p></td>
<td><p>Hard Sigmoid激活函数，按元素计算输出。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.HSwish.html#mindspore.nn.HSwish" title="mindspore.nn.HSwish"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.HSwish</span></code></a></p></td>
<td><p>Hard Swish激活函数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.LeakyReLU.html#mindspore.nn.LeakyReLU" title="mindspore.nn.LeakyReLU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.LeakyReLU</span></code></a></p></td>
<td><p>Leaky ReLU激活函数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.LogSigmoid.html#mindspore.nn.LogSigmoid" title="mindspore.nn.LogSigmoid"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.LogSigmoid</span></code></a></p></td>
<td><p>Log Sigmoid激活函数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.LogSoftmax.html#mindspore.nn.LogSoftmax" title="mindspore.nn.LogSoftmax"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.LogSoftmax</span></code></a></p></td>
<td><p>Log Softmax激活函数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.PReLU.html#mindspore.nn.PReLU" title="mindspore.nn.PReLU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.PReLU</span></code></a></p></td>
<td><p>PReLU激活层（PReLU Activation Operator）。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.ReLU.html#mindspore.nn.ReLU" title="mindspore.nn.ReLU"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ReLU</span></code></a></p></td>
<td><p>修正线性单元激活函数（Rectified Linear Unit activation function）。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.ReLU6.html#mindspore.nn.ReLU6" title="mindspore.nn.ReLU6"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ReLU6</span></code></a></p></td>
<td><p>ReLU6激活函数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Sigmoid.html#mindspore.nn.Sigmoid" title="mindspore.nn.Sigmoid"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Sigmoid</span></code></a></p></td>
<td><p>Sigmoid激活函数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Softmax.html#mindspore.nn.Softmax" title="mindspore.nn.Softmax"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Softmax</span></code></a></p></td>
<td><p>Softmax函数，它是二分类函数 <a class="reference internal" href="nn/mindspore.nn.Sigmoid.html#mindspore.nn.Sigmoid" title="mindspore.nn.Sigmoid"><code class="xref py py-class docutils literal notranslate"><span class="pre">mindspore.nn.Sigmoid</span></code></a> 在多分类上的推广，目的是将多分类的结果以概率的形式展现出来。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.SoftShrink.html#mindspore.nn.SoftShrink" title="mindspore.nn.SoftShrink"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.SoftShrink</span></code></a></p></td>
<td><p>Applies the SoftShrink function element-wise.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Tanh.html#mindspore.nn.Tanh" title="mindspore.nn.Tanh"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Tanh</span></code></a></p></td>
<td><p>Tanh激活函数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id8">
<h2>工具<a class="headerlink" href="#id8" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.ClipByNorm.html#mindspore.nn.ClipByNorm" title="mindspore.nn.ClipByNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ClipByNorm</span></code></a></p></td>
<td><p>Clips tensor values to a maximum <span class="math notranslate nohighlight">\(L_2\)</span>-norm.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Dense.html#mindspore.nn.Dense" title="mindspore.nn.Dense"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Dense</span></code></a></p></td>
<td><p>全连接层。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Dropout.html#mindspore.nn.Dropout" title="mindspore.nn.Dropout"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Dropout</span></code></a></p></td>
<td><p>随机丢弃层。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Flatten.html#mindspore.nn.Flatten" title="mindspore.nn.Flatten"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Flatten</span></code></a></p></td>
<td><p>对输入Tensor的第0维之外的维度进行展平操作。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.L1Regularizer.html#mindspore.nn.L1Regularizer" title="mindspore.nn.L1Regularizer"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.L1Regularizer</span></code></a></p></td>
<td><p>对权重计算L1正则化的值。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Norm.html#mindspore.nn.Norm" title="mindspore.nn.Norm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Norm</span></code></a></p></td>
<td><p>Computes the norm of vectors, currently including Euclidean norm, i.e., <span class="math notranslate nohighlight">\(L_2\)</span>-norm.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.OneHot.html#mindspore.nn.OneHot" title="mindspore.nn.OneHot"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.OneHot</span></code></a></p></td>
<td><p>返回一个one-hot类型的Tensor。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Pad.html#mindspore.nn.Pad" title="mindspore.nn.Pad"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Pad</span></code></a></p></td>
<td><p>根据 <cite>paddings</cite> 和 <cite>mode</cite> 对输入进行填充。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Range.html#mindspore.nn.Range" title="mindspore.nn.Range"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Range</span></code></a></p></td>
<td><p>Creates a sequence of numbers in range [start, limit) with step size delta.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.ResizeBilinear.html#mindspore.nn.ResizeBilinear" title="mindspore.nn.ResizeBilinear"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ResizeBilinear</span></code></a></p></td>
<td><p>使用双线性插值调整输入Tensor为指定的大小。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Roll.html#mindspore.nn.Roll" title="mindspore.nn.Roll"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Roll</span></code></a></p></td>
<td><p>Rolls the elements of a tensor along an axis.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Tril.html#mindspore.nn.Tril" title="mindspore.nn.Tril"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Tril</span></code></a></p></td>
<td><p>返回一个Tensor，指定主对角线以上的元素被置为零。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Triu.html#mindspore.nn.Triu" title="mindspore.nn.Triu"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Triu</span></code></a></p></td>
<td><p>返回一个Tensor，指定主对角线以下的元素被置为0。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Unfold.html#mindspore.nn.Unfold" title="mindspore.nn.Unfold"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Unfold</span></code></a></p></td>
<td><p>从图像中提取滑窗的区域块。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id9">
<h2>图像<a class="headerlink" href="#id9" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.CentralCrop.html#mindspore.nn.CentralCrop" title="mindspore.nn.CentralCrop"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.CentralCrop</span></code></a></p></td>
<td><p>Crops the central region of the images with the central_fraction.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.ImageGradients.html#mindspore.nn.ImageGradients" title="mindspore.nn.ImageGradients"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ImageGradients</span></code></a></p></td>
<td><p>Returns two tensors, the first is along the height dimension and the second is along the width dimension.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.MSSSIM.html#mindspore.nn.MSSSIM" title="mindspore.nn.MSSSIM"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MSSSIM</span></code></a></p></td>
<td><p>Returns MS-SSIM index between two images.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.PSNR.html#mindspore.nn.PSNR" title="mindspore.nn.PSNR"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.PSNR</span></code></a></p></td>
<td><p>Returns Peak Signal-to-Noise Ratio of two image batches.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.SSIM.html#mindspore.nn.SSIM" title="mindspore.nn.SSIM"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.SSIM</span></code></a></p></td>
<td><p>Returns SSIM index between two images.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id10">
<h2>归一化层<a class="headerlink" href="#id10" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.BatchNorm1d.html#mindspore.nn.BatchNorm1d" title="mindspore.nn.BatchNorm1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.BatchNorm1d</span></code></a></p></td>
<td><p>对输入的二维数据进行批归一化层(Batch Normalization Layer)。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.BatchNorm2d.html#mindspore.nn.BatchNorm2d" title="mindspore.nn.BatchNorm2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.BatchNorm2d</span></code></a></p></td>
<td><p>对输入的四维数据进行批归一化层(Batch Normalization Layer)。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.BatchNorm3d.html#mindspore.nn.BatchNorm3d" title="mindspore.nn.BatchNorm3d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.BatchNorm3d</span></code></a></p></td>
<td><p>对输入的五维数据进行批归一化层(Batch Normalization Layer)。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.GlobalBatchNorm.html#mindspore.nn.GlobalBatchNorm" title="mindspore.nn.GlobalBatchNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.GlobalBatchNorm</span></code></a></p></td>
<td><p>The GlobalBatchNorm interface is deprecated, please use the <a class="reference internal" href="nn/mindspore.nn.SyncBatchNorm.html#mindspore.nn.SyncBatchNorm" title="mindspore.nn.SyncBatchNorm"><code class="xref py py-class docutils literal notranslate"><span class="pre">mindspore.nn.SyncBatchNorm</span></code></a> instead.</p></td>
<td><p>deprecated</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.GroupNorm.html#mindspore.nn.GroupNorm" title="mindspore.nn.GroupNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.GroupNorm</span></code></a></p></td>
<td><p>在mini-batch输入上进行组归一化。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.InstanceNorm2d.html#mindspore.nn.InstanceNorm2d" title="mindspore.nn.InstanceNorm2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.InstanceNorm2d</span></code></a></p></td>
<td><p>对四维输入实现实例归一化层（Instance Normalization Layer）。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.LayerNorm.html#mindspore.nn.LayerNorm" title="mindspore.nn.LayerNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.LayerNorm</span></code></a></p></td>
<td><p>在mini-batch输入上应用层归一化（Layer Normalization）。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.MatrixDiag.html#mindspore.nn.MatrixDiag" title="mindspore.nn.MatrixDiag"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MatrixDiag</span></code></a></p></td>
<td><p>Returns a batched diagonal tensor with a given batched diagonal values.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.MatrixDiagPart.html#mindspore.nn.MatrixDiagPart" title="mindspore.nn.MatrixDiagPart"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MatrixDiagPart</span></code></a></p></td>
<td><p>Returns the batched diagonal part of a batched tensor.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.MatrixSetDiag.html#mindspore.nn.MatrixSetDiag" title="mindspore.nn.MatrixSetDiag"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MatrixSetDiag</span></code></a></p></td>
<td><p>Modifies the batched diagonal part of a batched tensor.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.SyncBatchNorm.html#mindspore.nn.SyncBatchNorm" title="mindspore.nn.SyncBatchNorm"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.SyncBatchNorm</span></code></a></p></td>
<td><p>Sync Batch Normalization layer over a N-dimension input.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id11">
<h2>池化层<a class="headerlink" href="#id11" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.AvgPool1d.html#mindspore.nn.AvgPool1d" title="mindspore.nn.AvgPool1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.AvgPool1d</span></code></a></p></td>
<td><p>对输入的多维数据进行一维平面上的平均池化运算。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.AvgPool2d.html#mindspore.nn.AvgPool2d" title="mindspore.nn.AvgPool2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.AvgPool2d</span></code></a></p></td>
<td><p>对输入的多维数据进行二维的平均池化运算。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.MaxPool1d.html#mindspore.nn.MaxPool1d" title="mindspore.nn.MaxPool1d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MaxPool1d</span></code></a></p></td>
<td><p>对输入的多维数据进行一维平面上的最大池化运算。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.MaxPool2d.html#mindspore.nn.MaxPool2d" title="mindspore.nn.MaxPool2d"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MaxPool2d</span></code></a></p></td>
<td><p>对输入的多维数据进行二维的最大池化运算。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id12">
<h2>量化<a class="headerlink" href="#id12" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.ActQuant.html#mindspore.nn.ActQuant" title="mindspore.nn.ActQuant"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ActQuant</span></code></a></p></td>
<td><p>Quantization aware training activation function.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Conv2dBnAct.html#mindspore.nn.Conv2dBnAct" title="mindspore.nn.Conv2dBnAct"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Conv2dBnAct</span></code></a></p></td>
<td><p>A combination of convolution, Batchnorm, and activation layer.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Conv2dBnFoldQuant.html#mindspore.nn.Conv2dBnFoldQuant" title="mindspore.nn.Conv2dBnFoldQuant"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Conv2dBnFoldQuant</span></code></a></p></td>
<td><p>2D convolution with Batch Normalization operation folded construct.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Conv2dBnFoldQuantOneConv.html#mindspore.nn.Conv2dBnFoldQuantOneConv" title="mindspore.nn.Conv2dBnFoldQuantOneConv"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Conv2dBnFoldQuantOneConv</span></code></a></p></td>
<td><p>2D convolution which use the convolution layer statistics once to calculate Batch Normalization operation folded construct.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Conv2dBnWithoutFoldQuant.html#mindspore.nn.Conv2dBnWithoutFoldQuant" title="mindspore.nn.Conv2dBnWithoutFoldQuant"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Conv2dBnWithoutFoldQuant</span></code></a></p></td>
<td><p>2D convolution and batchnorm without fold with fake quantized construct.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Conv2dQuant.html#mindspore.nn.Conv2dQuant" title="mindspore.nn.Conv2dQuant"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Conv2dQuant</span></code></a></p></td>
<td><p>2D convolution with fake quantized operation layer.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.DenseBnAct.html#mindspore.nn.DenseBnAct" title="mindspore.nn.DenseBnAct"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.DenseBnAct</span></code></a></p></td>
<td><p>A combination of Dense, Batchnorm, and the activation layer.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.DenseQuant.html#mindspore.nn.DenseQuant" title="mindspore.nn.DenseQuant"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.DenseQuant</span></code></a></p></td>
<td><p>The fully connected layer with fake quantized operation.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.FakeQuantWithMinMaxObserver.html#mindspore.nn.FakeQuantWithMinMaxObserver" title="mindspore.nn.FakeQuantWithMinMaxObserver"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.FakeQuantWithMinMaxObserver</span></code></a></p></td>
<td><p>Quantization aware operation which provides the fake quantization observer function on data with min and max.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.MulQuant.html#mindspore.nn.MulQuant" title="mindspore.nn.MulQuant"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MulQuant</span></code></a></p></td>
<td><p>Adds fake quantized operation after <cite>Mul</cite> operation.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.TensorAddQuant.html#mindspore.nn.TensorAddQuant" title="mindspore.nn.TensorAddQuant"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.TensorAddQuant</span></code></a></p></td>
<td><p>Adds fake quantized operation after TensorAdd operation.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id13">
<h2>损失函数<a class="headerlink" href="#id13" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.BCELoss.html#mindspore.nn.BCELoss" title="mindspore.nn.BCELoss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.BCELoss</span></code></a></p></td>
<td><p>计算目标值和预测值之间的二值交叉熵损失值。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.BCEWithLogitsLoss.html#mindspore.nn.BCEWithLogitsLoss" title="mindspore.nn.BCEWithLogitsLoss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.BCEWithLogitsLoss</span></code></a></p></td>
<td><p>输入经过sigmoid激活函数后作为预测值，BCEWithLogitsLoss计算预测值和目标值之间的二值交叉熵损失。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code>  <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.CosineEmbeddingLoss.html#mindspore.nn.CosineEmbeddingLoss" title="mindspore.nn.CosineEmbeddingLoss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.CosineEmbeddingLoss</span></code></a></p></td>
<td><p>CosineEmbeddingLoss creates a criterion to measure the similarity between two tensors using cosine distance.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.DiceLoss.html#mindspore.nn.DiceLoss" title="mindspore.nn.DiceLoss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.DiceLoss</span></code></a></p></td>
<td><p>Dice系数是一个集合相似性loss,用于计算两个样本之间的相似性。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.FocalLoss.html#mindspore.nn.FocalLoss" title="mindspore.nn.FocalLoss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.FocalLoss</span></code></a></p></td>
<td><p>FocalLoss函数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.L1Loss.html#mindspore.nn.L1Loss" title="mindspore.nn.L1Loss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.L1Loss</span></code></a></p></td>
<td><p>L1Loss用于计算预测值和目标值之间的平均绝对误差。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.LossBase.html#mindspore.nn.LossBase" title="mindspore.nn.LossBase"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.LossBase</span></code></a></p></td>
<td><p>构建损失函数的基类。</p></td>
<td><p></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.MSELoss.html#mindspore.nn.MSELoss" title="mindspore.nn.MSELoss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MSELoss</span></code></a></p></td>
<td><p>用于计算预测值与标签值之间的均方误差。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.MultiClassDiceLoss.html#mindspore.nn.MultiClassDiceLoss" title="mindspore.nn.MultiClassDiceLoss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MultiClassDiceLoss</span></code></a></p></td>
<td><p>When there are multiple classifications, label is transformed into multiple binary classifications by one hot.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.RMSELoss.html#mindspore.nn.RMSELoss" title="mindspore.nn.RMSELoss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.RMSELoss</span></code></a></p></td>
<td><p>RMSELoss用来测量 <span class="math notranslate nohighlight">\(x\)</span> 和 <span class="math notranslate nohighlight">\(y\)</span> 元素之间的均方根误差，其中 <span class="math notranslate nohighlight">\(x\)</span> 是输入Tensor， <span class="math notranslate nohighlight">\(y\)</span> 是目标值。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.SampledSoftmaxLoss.html#mindspore.nn.SampledSoftmaxLoss" title="mindspore.nn.SampledSoftmaxLoss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.SampledSoftmaxLoss</span></code></a></p></td>
<td><p>抽样交叉熵损失函数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.SmoothL1Loss.html#mindspore.nn.SmoothL1Loss" title="mindspore.nn.SmoothL1Loss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.SmoothL1Loss</span></code></a></p></td>
<td><p>SmoothL1损失函数，如果预测值和目标值的逐个元素绝对误差小于设定阈值 <cite>beta</cite> 则用平方项，否则用绝对误差项。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.SoftMarginLoss.html#mindspore.nn.SoftMarginLoss" title="mindspore.nn.SoftMarginLoss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.SoftMarginLoss</span></code></a></p></td>
<td><p>针对二分类问题的损失函数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.SoftmaxCrossEntropyWithLogits.html#mindspore.nn.SoftmaxCrossEntropyWithLogits" title="mindspore.nn.SoftmaxCrossEntropyWithLogits"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.SoftmaxCrossEntropyWithLogits</span></code></a></p></td>
<td><p>计算预测值与真实值之间的交叉熵。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id14">
<h2>优化器<a class="headerlink" href="#id14" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Adagrad.html#mindspore.nn.Adagrad" title="mindspore.nn.Adagrad"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Adagrad</span></code></a></p></td>
<td><p>Adagrad算法的实现。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Adam.html#mindspore.nn.Adam" title="mindspore.nn.Adam"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Adam</span></code></a></p></td>
<td><p>Adaptive Moment Estimation (Adam)算法的实现。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code>  <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.AdamOffload.html#mindspore.nn.AdamOffload" title="mindspore.nn.AdamOffload"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.AdamOffload</span></code></a></p></td>
<td><p>此优化器在主机CPU上运行Adam优化算法，设备上仅执行网络参数的更新，最大限度地降低内存成本。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.AdamWeightDecay.html#mindspore.nn.AdamWeightDecay" title="mindspore.nn.AdamWeightDecay"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.AdamWeightDecay</span></code></a></p></td>
<td><p>权重衰减Adam算法的实现。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.AdaSumbyDeltaWeightWrapCell.html#mindspore.nn.AdaSumByDeltaWeightWrapCell" title="mindspore.nn.AdaSumByDeltaWeightWrapCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.AdaSumByDeltaWeightWrapCell</span></code></a></p></td>
<td><p></p></td>
<td><p></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.AdaSumbyGradWrapCell.html#mindspore.nn.AdaSumByGradWrapCell" title="mindspore.nn.AdaSumByGradWrapCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.AdaSumByGradWrapCell</span></code></a></p></td>
<td><p></p></td>
<td><p></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.ASGD.html#mindspore.nn.ASGD" title="mindspore.nn.ASGD"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ASGD</span></code></a></p></td>
<td><p>Implements Average Stochastic Gradient Descent.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.FTRL.html#mindspore.nn.FTRL" title="mindspore.nn.FTRL"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.FTRL</span></code></a></p></td>
<td><p>FTRL算法实现。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Lamb.html#mindspore.nn.Lamb" title="mindspore.nn.Lamb"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Lamb</span></code></a></p></td>
<td><p>LAMB（Layer-wise Adaptive Moments optimizer for Batching training，用于批训练的分层自适应矩优化器）算法的实现。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.LARS.html#mindspore.nn.LARS" title="mindspore.nn.LARS"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.LARS</span></code></a></p></td>
<td><p>LARS算法的实现。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.LazyAdam.html#mindspore.nn.LazyAdam" title="mindspore.nn.LazyAdam"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.LazyAdam</span></code></a></p></td>
<td><p>Adaptive Moment Estimation (Adam)算法的实现。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Momentum.html#mindspore.nn.Momentum" title="mindspore.nn.Momentum"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Momentum</span></code></a></p></td>
<td><p>Momentum算法的实现。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Optimizer.html#mindspore.nn.Optimizer" title="mindspore.nn.Optimizer"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Optimizer</span></code></a></p></td>
<td><p>用于参数更新的优化器基类。</p></td>
<td><p></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.ProximalAdagrad.html#mindspore.nn.ProximalAdagrad" title="mindspore.nn.ProximalAdagrad"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ProximalAdagrad</span></code></a></p></td>
<td><p>ProximalAdagrad算法的实现。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.RMSProp.html#mindspore.nn.RMSProp" title="mindspore.nn.RMSProp"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.RMSProp</span></code></a></p></td>
<td><p>均方根传播（RMSProp）算法的实现。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Rprop.html#mindspore.nn.Rprop" title="mindspore.nn.Rprop"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Rprop</span></code></a></p></td>
<td><p>Implements Resilient backpropagation.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.SGD.html#mindspore.nn.SGD" title="mindspore.nn.SGD"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.SGD</span></code></a></p></td>
<td><p>随机梯度下降的实现。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.thor.html#mindspore.nn.thor" title="mindspore.nn.thor"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.thor</span></code></a></p></td>
<td><p>通过二阶算法THOR更新参数。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="wrapper">
<h2>Wrapper<a class="headerlink" href="#wrapper" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.DistributedGradReducer.html#mindspore.nn.DistributedGradReducer" title="mindspore.nn.DistributedGradReducer"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.DistributedGradReducer</span></code></a></p></td>
<td><p>分布式优化器。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.DynamicLossScaleUpdateCell.html#mindspore.nn.DynamicLossScaleUpdateCell" title="mindspore.nn.DynamicLossScaleUpdateCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.DynamicLossScaleUpdateCell</span></code></a></p></td>
<td><p>用于动态更新损失缩放系数(loss scale)的神经元。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.FixedLossScaleUpdateCell.html#mindspore.nn.FixedLossScaleUpdateCell" title="mindspore.nn.FixedLossScaleUpdateCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.FixedLossScaleUpdateCell</span></code></a></p></td>
<td><p>固定损失缩放系数的神经元。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.ForwardValueAndGrad.html#mindspore.nn.ForwardValueAndGrad" title="mindspore.nn.ForwardValueAndGrad"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ForwardValueAndGrad</span></code></a></p></td>
<td><p>训练网络的封装。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.GetNextSingleOp.html#mindspore.nn.GetNextSingleOp" title="mindspore.nn.GetNextSingleOp"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.GetNextSingleOp</span></code></a></p></td>
<td><p>用于获取下一条数据的Cell。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.MicroBatchInterleaved.html#mindspore.nn.MicroBatchInterleaved" title="mindspore.nn.MicroBatchInterleaved"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MicroBatchInterleaved</span></code></a></p></td>
<td><p>Wrap the network with Batch Size.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.ParameterUpdate.html#mindspore.nn.ParameterUpdate" title="mindspore.nn.ParameterUpdate"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ParameterUpdate</span></code></a></p></td>
<td><p>更新参数的Cell。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.PipelineCell.html#mindspore.nn.PipelineCell" title="mindspore.nn.PipelineCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.PipelineCell</span></code></a></p></td>
<td><p>将MiniBatch切分成更细粒度的MicroBatch，用于流水线并行的训练中。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.TimeDistributed.html#mindspore.nn.TimeDistributed" title="mindspore.nn.TimeDistributed"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.TimeDistributed</span></code></a></p></td>
<td><p>The time distributed layer.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.TrainOneStepCell.html#mindspore.nn.TrainOneStepCell" title="mindspore.nn.TrainOneStepCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.TrainOneStepCell</span></code></a></p></td>
<td><p>训练网络封装类。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.TrainOneStepWithLossScaleCell.html#mindspore.nn.TrainOneStepWithLossScaleCell" title="mindspore.nn.TrainOneStepWithLossScaleCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.TrainOneStepWithLossScaleCell</span></code></a></p></td>
<td><p>使用混合精度功能的训练网络。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.WithEvalCell.html#mindspore.nn.WithEvalCell" title="mindspore.nn.WithEvalCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.WithEvalCell</span></code></a></p></td>
<td><p>封装前向网络和损失函数，返回用于计算评估指标的损失函数值、前向输出和标签。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.WithGradCell.html#mindspore.nn.WithGradCell" title="mindspore.nn.WithGradCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.WithGradCell</span></code></a></p></td>
<td><p>Cell that returns the gradients.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.WithLossCell.html#mindspore.nn.WithLossCell" title="mindspore.nn.WithLossCell"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.WithLossCell</span></code></a></p></td>
<td><p>包含损失函数的Cell。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id15">
<h2>数学运算<a class="headerlink" href="#id15" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.MatMul.html#mindspore.nn.MatMul" title="mindspore.nn.MatMul"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MatMul</span></code></a></p></td>
<td><p>The nn.MatMul interface is deprecated, please use the <a class="reference internal" href="ops/mindspore.ops.matmul.html#mindspore.ops.matmul" title="mindspore.ops.matmul"><code class="xref py py-class docutils literal notranslate"><span class="pre">mindspore.ops.matmul</span></code></a> instead.</p></td>
<td><p>deprecated</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Moments.html#mindspore.nn.Moments" title="mindspore.nn.Moments"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Moments</span></code></a></p></td>
<td><p>沿指定轴 <cite>axis</cite> 计算输入 <cite>x</cite> 的均值和方差。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.ReduceLogSumExp.html#mindspore.nn.ReduceLogSumExp" title="mindspore.nn.ReduceLogSumExp"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ReduceLogSumExp</span></code></a></p></td>
<td><p>Reduces a dimension of a tensor by calculating exponential for all elements in the dimension, then calculate logarithm of the sum.</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id16">
<h2>评估指标<a class="headerlink" href="#id16" title="Permalink to this headline">¶</a></h2>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 90%" />
</colgroup>
<tbody>
<tr class="row-odd"></tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Accuracy.html#mindspore.nn.Accuracy" title="mindspore.nn.Accuracy"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Accuracy</span></code></a></p></td>
<td><p>计算数据分类的正确率，包括二分类和多分类。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.auc.html#mindspore.nn.auc" title="mindspore.nn.auc"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.auc</span></code></a></p></td>
<td><p>使用梯形规则计算曲线下面积AUC（Area Under the Curve，AUC）。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.BleuScore.html#mindspore.nn.BleuScore" title="mindspore.nn.BleuScore"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.BleuScore</span></code></a></p></td>
<td><p>计算具有一个或多个引用的机器翻译文本的BLEU分数。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.ConfusionMatrix.html#mindspore.nn.ConfusionMatrix" title="mindspore.nn.ConfusionMatrix"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ConfusionMatrix</span></code></a></p></td>
<td><p>计算混淆矩阵(confusion matrix)，通常用于评估分类模型的性能，包括二分类和多分类场景。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.ConfusionMatrixMetric.html#mindspore.nn.ConfusionMatrixMetric" title="mindspore.nn.ConfusionMatrixMetric"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ConfusionMatrixMetric</span></code></a></p></td>
<td><p>度量分类模型的性能矩阵是输出为二进制或多类的模型。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.CosineSimilarity.html#mindspore.nn.CosineSimilarity" title="mindspore.nn.CosineSimilarity"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.CosineSimilarity</span></code></a></p></td>
<td><p>计算表示相似性。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Dice.html#mindspore.nn.Dice" title="mindspore.nn.Dice"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Dice</span></code></a></p></td>
<td><p>集合相似性度量。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.F1.html#mindspore.nn.F1" title="mindspore.nn.F1"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.F1</span></code></a></p></td>
<td><p>计算F1 score。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Fbeta.html#mindspore.nn.Fbeta" title="mindspore.nn.Fbeta"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Fbeta</span></code></a></p></td>
<td><p>计算Fbeta评分。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.HausdorffDistance.html#mindspore.nn.HausdorffDistance" title="mindspore.nn.HausdorffDistance"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.HausdorffDistance</span></code></a></p></td>
<td><p>计算Hausdorff距离。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.get_metric_fn.html#mindspore.nn.get_metric_fn" title="mindspore.nn.get_metric_fn"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.get_metric_fn</span></code></a></p></td>
<td><p>根据输入的 <cite>name</cite> 获取metric的方法。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Loss.html#mindspore.nn.Loss" title="mindspore.nn.Loss"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Loss</span></code></a></p></td>
<td><p>计算loss的平均值。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.MAE.html#mindspore.nn.MAE" title="mindspore.nn.MAE"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MAE</span></code></a></p></td>
<td><p>计算平均绝对误差MAE（Mean Absolute Error）。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.MeanSurfaceDistance.html#mindspore.nn.MeanSurfaceDistance" title="mindspore.nn.MeanSurfaceDistance"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MeanSurfaceDistance</span></code></a></p></td>
<td><p>计算从 <cite>y_pred</cite> 到 <cite>y</cite> 的平均表面距离。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Metric.html#mindspore.nn.Metric" title="mindspore.nn.Metric"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Metric</span></code></a></p></td>
<td><p>用于计算评估指标的基类。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.MSE.html#mindspore.nn.MSE" title="mindspore.nn.MSE"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.MSE</span></code></a></p></td>
<td><p>测量均方差MSE（Mean Squared Error）。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.names.html#mindspore.nn.names" title="mindspore.nn.names"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.names</span></code></a></p></td>
<td><p>获取所有metric的名称。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.OcclusionSensitivity.html#mindspore.nn.OcclusionSensitivity" title="mindspore.nn.OcclusionSensitivity"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.OcclusionSensitivity</span></code></a></p></td>
<td><p>用于计算神经网络对给定图像的遮挡灵敏度（Occlusion Sensitivity），表示了图像的哪些部分对神经网络的分类决策最重要。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Perplexity.html#mindspore.nn.Perplexity" title="mindspore.nn.Perplexity"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Perplexity</span></code></a></p></td>
<td><p>计算困惑度（perplexity）。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Precision.html#mindspore.nn.Precision" title="mindspore.nn.Precision"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Precision</span></code></a></p></td>
<td><p>计算数据分类的精度，包括单标签场景和多标签场景。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Recall.html#mindspore.nn.Recall" title="mindspore.nn.Recall"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Recall</span></code></a></p></td>
<td><p>计算数据分类的召回率，包括单标签场景和多标签场景。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.ROC.html#mindspore.nn.ROC" title="mindspore.nn.ROC"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ROC</span></code></a></p></td>
<td><p>计算ROC曲线。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.RootMeanSquareDistance.html#mindspore.nn.RootMeanSquareDistance" title="mindspore.nn.RootMeanSquareDistance"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.RootMeanSquareDistance</span></code></a></p></td>
<td><p>计算从 <cite>y_pred</cite> 到 <cite>y</cite> 的均方根表面距离。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.rearrange_inputs.html#mindspore.nn.rearrange_inputs" title="mindspore.nn.rearrange_inputs"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.rearrange_inputs</span></code></a></p></td>
<td><p>此装饰器用于根据类的 <cite>indexes</cite> 属性对输入重新排列。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.Top1CategoricalAccuracy.html#mindspore.nn.Top1CategoricalAccuracy" title="mindspore.nn.Top1CategoricalAccuracy"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Top1CategoricalAccuracy</span></code></a></p></td>
<td><p>计算top-1分类正确率。</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.Top5CategoricalAccuracy.html#mindspore.nn.Top5CategoricalAccuracy" title="mindspore.nn.Top5CategoricalAccuracy"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.Top5CategoricalAccuracy</span></code></a></p></td>
<td><p>计算top-5分类正确率。</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.TopKCategoricalAccuracy.html#mindspore.nn.TopKCategoricalAccuracy" title="mindspore.nn.TopKCategoricalAccuracy"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.TopKCategoricalAccuracy</span></code></a></p></td>
<td><p>计算top-k分类正确率。</p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="id17">
<h2>动态学习率<a class="headerlink" href="#id17" title="Permalink to this headline">¶</a></h2>
<div class="section" id="learningrateschedule">
<h3>LearningRateSchedule类<a class="headerlink" href="#learningrateschedule" title="Permalink to this headline">¶</a></h3>
<p>本模块中的动态学习率都是LearningRateSchedule的子类，将LearningRateSchedule的实例传递给优化器。在训练过程中，优化器以当前step为输入调用该实例，得到当前的学习率。</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">mindspore.nn</span> <span class="k">as</span> <span class="nn">nn</span>

<span class="n">min_lr</span> <span class="o">=</span> <span class="mf">0.01</span>
<span class="n">max_lr</span> <span class="o">=</span> <span class="mf">0.1</span>
<span class="n">decay_steps</span> <span class="o">=</span> <span class="mi">4</span>
<span class="n">cosine_decay_lr</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">CosineDecayLR</span><span class="p">(</span><span class="n">min_lr</span><span class="p">,</span> <span class="n">max_lr</span><span class="p">,</span> <span class="n">decay_steps</span><span class="p">)</span>

<span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="n">optim</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="n">learning_rate</span><span class="o">=</span><span class="n">cosine_decay_lr</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
</pre></div>
</div>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.CosineDecayLR.html#mindspore.nn.CosineDecayLR" title="mindspore.nn.CosineDecayLR"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.CosineDecayLR</span></code></a></p></td>
<td><p>基于余弦衰减函数计算学习率。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.ExponentialDecayLR.html#mindspore.nn.ExponentialDecayLR" title="mindspore.nn.ExponentialDecayLR"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.ExponentialDecayLR</span></code></a></p></td>
<td><p>基于指数衰减函数计算学习率。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.InverseDecayLR.html#mindspore.nn.InverseDecayLR" title="mindspore.nn.InverseDecayLR"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.InverseDecayLR</span></code></a></p></td>
<td><p>基于逆时衰减函数计算学习率。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.NaturalExpDecayLR.html#mindspore.nn.NaturalExpDecayLR" title="mindspore.nn.NaturalExpDecayLR"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.NaturalExpDecayLR</span></code></a></p></td>
<td><p>基于自然指数衰减函数计算学习率。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.PolynomialDecayLR.html#mindspore.nn.PolynomialDecayLR" title="mindspore.nn.PolynomialDecayLR"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.PolynomialDecayLR</span></code></a></p></td>
<td><p>基于多项式衰减函数计算学习率。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.WarmUpLR.html#mindspore.nn.WarmUpLR" title="mindspore.nn.WarmUpLR"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.WarmUpLR</span></code></a></p></td>
<td><p>预热学习率。</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="dynamic-lr">
<h3>Dynamic LR函数<a class="headerlink" href="#dynamic-lr" title="Permalink to this headline">¶</a></h3>
<p>本模块中的动态学习率都是function，调用function并将结果传递给优化器。在训练过程中，优化器将result[current step]作为当前学习率。</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">mindspore.nn</span> <span class="k">as</span> <span class="nn">nn</span>

<span class="n">min_lr</span> <span class="o">=</span> <span class="mf">0.01</span>
<span class="n">max_lr</span> <span class="o">=</span> <span class="mf">0.1</span>
<span class="n">total_step</span> <span class="o">=</span> <span class="mi">6</span>
<span class="n">step_per_epoch</span> <span class="o">=</span> <span class="mi">1</span>
<span class="n">decay_epoch</span> <span class="o">=</span> <span class="mi">4</span>

<span class="n">lr</span><span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">cosine_decay_lr</span><span class="p">(</span><span class="n">min_lr</span><span class="p">,</span> <span class="n">max_lr</span><span class="p">,</span> <span class="n">total_step</span><span class="p">,</span> <span class="n">step_per_epoch</span><span class="p">,</span> <span class="n">decay_epoch</span><span class="p">)</span>

<span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="n">optim</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="n">learning_rate</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
</pre></div>
</div>
<table class="longtable docutils align-default">
<colgroup>
<col style="width: 10%" />
<col style="width: 60%" />
<col style="width: 30%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p>接口名</p></td>
<td><p>概述</p></td>
<td><p>支持平台</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.cosine_decay_lr.html#mindspore.nn.cosine_decay_lr" title="mindspore.nn.cosine_decay_lr"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.cosine_decay_lr</span></code></a></p></td>
<td><p>基于余弦衰减函数计算学习率。</p></td>
<td><p></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.exponential_decay_lr.html#mindspore.nn.exponential_decay_lr" title="mindspore.nn.exponential_decay_lr"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.exponential_decay_lr</span></code></a></p></td>
<td><p>基于指数衰减函数计算学习率。</p></td>
<td><p></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.inverse_decay_lr.html#mindspore.nn.inverse_decay_lr" title="mindspore.nn.inverse_decay_lr"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.inverse_decay_lr</span></code></a></p></td>
<td><p>基于逆时衰减函数计算学习率。</p></td>
<td><p></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.natural_exp_decay_lr.html#mindspore.nn.natural_exp_decay_lr" title="mindspore.nn.natural_exp_decay_lr"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.natural_exp_decay_lr</span></code></a></p></td>
<td><p>基于自然指数衰减函数计算学习率。</p></td>
<td><p></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.piecewise_constant_lr.html#mindspore.nn.piecewise_constant_lr" title="mindspore.nn.piecewise_constant_lr"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.piecewise_constant_lr</span></code></a></p></td>
<td><p>获取分段常量学习率。</p></td>
<td><p></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="nn/mindspore.nn.polynomial_decay_lr.html#mindspore.nn.polynomial_decay_lr" title="mindspore.nn.polynomial_decay_lr"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.polynomial_decay_lr</span></code></a></p></td>
<td><p>基于多项式衰减函数计算学习率。</p></td>
<td><p></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="nn/mindspore.nn.warmup_lr.html#mindspore.nn.warmup_lr" title="mindspore.nn.warmup_lr"><code class="xref py py-obj docutils literal notranslate"><span class="pre">mindspore.nn.warmup_lr</span></code></a></p></td>
<td><p>预热学习率。</p></td>
<td><p></p></td>
</tr>
</tbody>
</table>
</div>
</div>
</div>


           </div>
           
          </div>
          <footer>
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
        <a href="nn/mindspore.nn.Cell.html" class="btn btn-neutral float-right" title="mindspore.nn.Cell" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
        <a href="mindspore.mindrecord.html" class="btn btn-neutral float-left" title="mindspore.mindrecord" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
    </div>

  <hr/>

  <div role="contentinfo">
    <p>
        &#169; Copyright 2021, MindSpore.

    </p>
  </div>
    
    
    
    Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>
        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
   

</body>
</html>