

<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8" />
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
  
  <title>mindspore.nn.loss.loss &mdash; MindSpore master documentation</title>
  

  
  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />

  
  

  
  

  

  
  <!--[if lt IE 9]>
    <script src="../../../../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../../../" src="../../../../_static/documentation_options.js"></script>
        <script src="../../../../_static/jquery.js"></script>
        <script src="../../../../_static/underscore.js"></script>
        <script src="../../../../_static/doctools.js"></script>
        <script src="../../../../_static/language_data.js"></script>
        <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
    
    <script type="text/javascript" src="../../../../_static/js/theme.js"></script>

    
    <link rel="index" title="Index" href="../../../../genindex.html" />
    <link rel="search" title="Search" href="../../../../search.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../../../index.html" class="icon icon-home"> MindSpore
          

          
          </a>

          
            
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">MindSpore Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.html">mindspore</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.common.initializer.html">mindspore.common.initializer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.communication.html">mindspore.communication</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.compression.html">mindspore.compression</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.context.html">mindspore.context</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.html">mindspore.dataset</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.audio.html">mindspore.dataset.audio</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.config.html">mindspore.dataset.config</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.text.html">mindspore.dataset.text</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.transforms.html">mindspore.dataset.transforms</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.vision.html">mindspore.dataset.vision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.mindrecord.html">mindspore.mindrecord</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.html">mindspore.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.probability.html">mindspore.nn.probability</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.transformer.html">mindspore.nn.transformer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.numpy.html">mindspore.numpy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.ops.html">mindspore.ops</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.parallel.html">mindspore.parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.parallel.nn.html">mindspore.parallel.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.profiler.html">mindspore.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.scipy.html">mindspore.scipy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.train.html">mindspore.train</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.boost.html">mindspore.boost</a></li>
</ul>
<p class="caption"><span class="caption-text">MindSpore C++ API</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://www.mindspore.cn/lite/api/zh-CN/master/api_cpp/mindspore.html">MindSpore Lite↗</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../../../../index.html">MindSpore</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          

















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../../../index.html" class="icon icon-home"></a> &raquo;</li>
        
          <li><a href="../../../index.html">Module code</a> &raquo;</li>
        
      <li>mindspore.nn.loss.loss</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <h1>Source code for mindspore.nn.loss.loss</h1><div class="highlight"><pre>
<span></span><span class="c1"># Copyright 2020-2021 Huawei Technologies Co., Ltd</span>
<span class="c1">#</span>
<span class="c1"># Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);</span>
<span class="c1"># you may not use this file except in compliance with the License.</span>
<span class="c1"># You may obtain a copy of the License at</span>
<span class="c1">#</span>
<span class="c1"># http://www.apache.org/licenses/LICENSE-2.0</span>
<span class="c1">#</span>
<span class="c1"># Unless required by applicable law or agreed to in writing, software</span>
<span class="c1"># distributed under the License is distributed on an &quot;AS IS&quot; BASIS,</span>
<span class="c1"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span>
<span class="c1"># See the License for the specific language governing permissions and</span>
<span class="c1"># limitations under the License.</span>
<span class="c1"># ============================================================================</span>
<span class="sd">&quot;&quot;&quot;loss&quot;&quot;&quot;</span>
<span class="kn">import</span> <span class="nn">mindspore</span>
<span class="kn">import</span> <span class="nn">mindspore.common.dtype</span> <span class="k">as</span> <span class="nn">mstype</span>
<span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">log</span>
<span class="kn">from</span> <span class="nn">mindspore.common.tensor</span> <span class="kn">import</span> <span class="n">Tensor</span>
<span class="kn">from</span> <span class="nn">mindspore.common.parameter</span> <span class="kn">import</span> <span class="n">Parameter</span>
<span class="kn">from</span> <span class="nn">mindspore.ops</span> <span class="kn">import</span> <span class="n">operations</span> <span class="k">as</span> <span class="n">P</span>
<span class="kn">from</span> <span class="nn">mindspore.ops</span> <span class="kn">import</span> <span class="n">functional</span> <span class="k">as</span> <span class="n">F</span>
<span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">nn</span>
<span class="kn">from</span> <span class="nn">mindspore.ops.primitive</span> <span class="kn">import</span> <span class="n">constexpr</span>
<span class="kn">from</span> <span class="nn">mindspore.nn.cell</span> <span class="kn">import</span> <span class="n">Cell</span>
<span class="kn">from</span> <span class="nn">mindspore.nn.layer.activation</span> <span class="kn">import</span> <span class="n">get_activation</span>
<span class="kn">from</span> <span class="nn">mindspore._checkparam</span> <span class="kn">import</span> <span class="n">Validator</span> <span class="k">as</span> <span class="n">validator</span>
<span class="kn">from</span> <span class="nn">mindspore._checkparam</span> <span class="kn">import</span> <span class="n">Rel</span>
<span class="kn">from</span> <span class="nn">...</span> <span class="kn">import</span> <span class="n">context</span>


<span class="k">class</span> <span class="nc">LossBase</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Base class for other losses.</span>

<span class="sd">    Other losses derived from this should implement their own `construct` and use method `self.get_loss`</span>
<span class="sd">    to apply reduction to loss values.</span>

<span class="sd">    Args:</span>
<span class="sd">        reduction (str): Type of reduction to be applied to loss. The optional values are &quot;mean&quot;, &quot;sum&quot;, and &quot;none&quot;.</span>
<span class="sd">            Default: &quot;mean&quot;.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If `reduction` is not one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;mean&#39;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Loss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">LossBase</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>

        <span class="k">if</span> <span class="n">reduction</span> <span class="ow">not</span> <span class="ow">in</span> <span class="p">(</span><span class="s1">&#39;mean&#39;</span><span class="p">,</span> <span class="s1">&#39;sum&#39;</span><span class="p">,</span> <span class="s1">&#39;none&#39;</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;reduction&#39; should be in [&#39;mean&#39;, &#39;sum&#39;, &#39;none&#39;], &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="n">reduction</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">average</span> <span class="o">=</span> <span class="kc">True</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">reduce</span> <span class="o">=</span> <span class="kc">True</span>
        <span class="k">if</span> <span class="n">reduction</span> <span class="o">==</span> <span class="s1">&#39;sum&#39;</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">average</span> <span class="o">=</span> <span class="kc">False</span>
        <span class="k">if</span> <span class="n">reduction</span> <span class="o">==</span> <span class="s1">&#39;none&#39;</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">reduce</span> <span class="o">=</span> <span class="kc">False</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">reduce_mean</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">ReduceMean</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">ReduceSum</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">mul</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Mul</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">cast</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Cast</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">get_axis</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Get a range of axis for input.</span>

<span class="sd">        Args:</span>
<span class="sd">            x (Tensor): Tensor of any shape.</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; class Net(nn.LossBase):</span>
<span class="sd">            ...     def __init__(self, reduction=&#39;mean&#39;):</span>
<span class="sd">            ...         super(Net, self).__init__(reduction)</span>
<span class="sd">            ...         self.abs = ops.Abs()</span>
<span class="sd">            ...</span>
<span class="sd">            ...     def construct(self, logits, labels):</span>
<span class="sd">            ...         x = self.abs(logits - labels)</span>
<span class="sd">            ...         axis = self.get_axis(x)</span>
<span class="sd">            ...         return axis</span>
<span class="sd">            &gt;&gt;&gt; net = Net()</span>
<span class="sd">            &gt;&gt;&gt; # Case 1: logits.shape = labels.shape = (3,)</span>
<span class="sd">            &gt;&gt;&gt; logits = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">            &gt;&gt;&gt; labels = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">            &gt;&gt;&gt; output = net(logits, labels)</span>
<span class="sd">            &gt;&gt;&gt; print(output)</span>
<span class="sd">            (0,)</span>
<span class="sd">            &gt;&gt;&gt; # Case 2: logits.shape = labels.shape = (3, 3)</span>
<span class="sd">            &gt;&gt;&gt; logits = Tensor(np.array([[1, 2, 3],[1, 2, 3],[1, 2, 3]]), mindspore.float32)</span>
<span class="sd">            &gt;&gt;&gt; labels = Tensor(np.array([[1, 2, 3],[1, 2, 3],[1, 2, 3]]), mindspore.float32)</span>
<span class="sd">            &gt;&gt;&gt; output = net(logits, labels)</span>
<span class="sd">            &gt;&gt;&gt; print(output)</span>
<span class="sd">            (0, 1)</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="n">shape</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">shape</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
        <span class="n">length</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">tuple_len</span><span class="p">(</span><span class="n">shape</span><span class="p">)</span>
        <span class="n">perm</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">make_range</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">length</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">perm</span>

    <span class="k">def</span> <span class="nf">get_loss</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">weights</span><span class="o">=</span><span class="mf">1.0</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Computes the weighted loss.</span>

<span class="sd">        Args:</span>
<span class="sd">            x (Tensor): Tensor of shape :math:`(N, *)` where :math:`*` means, any number of</span>
<span class="sd">                additional dimensions.</span>
<span class="sd">            weights (Union[float, Tensor]): Optional `Tensor` whose rank is either 0, or the same rank as inputs,</span>
<span class="sd">                and must be broadcastable to inputs (i.e., all dimensions must be either `1`,</span>
<span class="sd">                or the same as the corresponding inputs dimension). Default: 1.0.</span>

<span class="sd">        Examples:</span>
<span class="sd">            &gt;&gt;&gt; class Net(nn.LossBase):</span>
<span class="sd">            ...     def __init__(self, reduction=&#39;mean&#39;):</span>
<span class="sd">            ...         super(Net, self).__init__(reduction)</span>
<span class="sd">            ...         self.abs = ops.Abs()</span>
<span class="sd">            ...</span>
<span class="sd">            ...     def construct(self, logits, labels):</span>
<span class="sd">            ...         x = self.abs(logits - labels)</span>
<span class="sd">            ...         output = self.get_loss(x)</span>
<span class="sd">            ...         return output</span>
<span class="sd">            &gt;&gt;&gt; net = Net()</span>
<span class="sd">            &gt;&gt;&gt; # Case 1: logits.shape = labels.shape = (3,)</span>
<span class="sd">            &gt;&gt;&gt; logits = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">            &gt;&gt;&gt; labels = Tensor(np.array([1, 2, 2]), mindspore.float32)</span>
<span class="sd">            &gt;&gt;&gt; output = net(logits, labels)</span>
<span class="sd">            &gt;&gt;&gt; print(output)</span>
<span class="sd">            0.33333334</span>
<span class="sd">            &gt;&gt;&gt; # Case 2: logits.shape = labels.shape = (3, 3)</span>
<span class="sd">            &gt;&gt;&gt; logits = Tensor(np.array([[1, 2, 3],[1, 2, 3],[1, 2, 3]]), mindspore.float32)</span>
<span class="sd">            &gt;&gt;&gt; labels = Tensor(np.array([[1, 2, 2],[1, 2, 3],[1, 2, 3]]), mindspore.float32)</span>
<span class="sd">            &gt;&gt;&gt; output = net(logits, labels)</span>
<span class="sd">            &gt;&gt;&gt; print(output)</span>
<span class="sd">            0.11111111</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="n">input_dtype</span> <span class="o">=</span> <span class="n">x</span><span class="o">.</span><span class="n">dtype</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">cast</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
        <span class="n">weights</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">cast</span><span class="p">(</span><span class="n">weights</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">mul</span><span class="p">(</span><span class="n">weights</span><span class="p">,</span> <span class="n">x</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduce</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">average</span><span class="p">:</span>
            <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduce_mean</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">get_axis</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduce</span> <span class="ow">and</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">average</span><span class="p">:</span>
            <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">get_axis</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">cast</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">input_dtype</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
        <span class="k">raise</span> <span class="ne">NotImplementedError</span>


<span class="k">class</span> <span class="nc">_Loss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Base class for other losses.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;mean&#39;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize _Loss.&quot;&quot;&quot;</span>
        <span class="n">log</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span><span class="s2">&quot;&#39;_Loss&#39; is deprecated from version 1.3 and &quot;</span>
                    <span class="s2">&quot;will be removed in a future version, use &#39;LossBase&#39; instead.&quot;</span><span class="p">)</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">_Loss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">reduction</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
        <span class="k">raise</span> <span class="ne">NotImplementedError</span>


<span class="nd">@constexpr</span>
<span class="k">def</span> <span class="nf">_check_is_tensor</span><span class="p">(</span><span class="n">param_name</span><span class="p">,</span> <span class="n">input_data</span><span class="p">,</span> <span class="n">cls_name</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Internal function, used to check whether the input data is Tensor.&quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="n">input_data</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">F</span><span class="o">.</span><span class="n">typeof</span><span class="p">(</span><span class="n">input_data</span><span class="p">),</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">):</span>
        <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;</span><span class="si">{</span><span class="n">param_name</span><span class="si">}</span><span class="s2">&#39; should be &#39;</span><span class="si">{</span><span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="si">}</span><span class="s2">&#39;, &quot;</span>
                        <span class="sa">f</span><span class="s2">&quot;but got &#39;</span><span class="si">{</span><span class="n">F</span><span class="o">.</span><span class="n">typeof</span><span class="p">(</span><span class="n">input_data</span><span class="p">)</span><span class="si">}</span><span class="s2">&#39;&quot;</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">L1Loss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    L1Loss creates a criterion to measure the mean absolute error (MAE) between :math:`x` and :math:`y` element-wise,</span>
<span class="sd">    where :math:`x` is the input Tensor and :math:`y` is the labels Tensor.</span>

<span class="sd">    For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,</span>
<span class="sd">    the unreduced loss (i.e. with argument reduction set to &#39;none&#39;) of :math:`x` and :math:`y` is given as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with } l_n = \left| x_n - y_n \right|,</span>

<span class="sd">    where :math:`N` is the batch size. If `reduction` is not &#39;none&#39;, then:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \ell(x, y) =</span>
<span class="sd">        \begin{cases}</span>
<span class="sd">            \operatorname{mean}(L), &amp; \text{if reduction} = \text{&#39;mean&#39;;}\\</span>
<span class="sd">            \operatorname{sum}(L),  &amp; \text{if reduction} = \text{&#39;sum&#39;.}</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Args:</span>
<span class="sd">        reduction (str): Type of reduction to be applied to loss. The optional values are &quot;mean&quot;, &quot;sum&quot;, and &quot;none&quot;.</span>
<span class="sd">            Default: &quot;mean&quot;.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - Tensor of shape :math:`(N, *)` where :math:`*` means, any number of</span>
<span class="sd">          additional dimensions.</span>
<span class="sd">        - **labels** (Tensor) - Tensor of shape :math:`(N, *)`, same shape as the `logits` in common cases.</span>
<span class="sd">          However, it supports the shape of `logits` is different from the shape of `labels`</span>
<span class="sd">          and they should be broadcasted to each other.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, loss float tensor, the shape is zero if `reduction` is &#39;mean&#39; or &#39;sum&#39;,</span>
<span class="sd">        while the shape of output is the broadcasted shape if `reduction` is &#39;none&#39;.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If `reduction` is not one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; # Case 1: logits.shape = labels.shape = (3,)</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.L1Loss()</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([1, 2, 2]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        0.33333334</span>
<span class="sd">        &gt;&gt;&gt; # Case 2: logits.shape = (3,), labels.shape = (2, 3)</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.L1Loss(reduction=&#39;none&#39;)</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([[1, 1, 1], [1, 2, 2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0. 1. 2.]</span>
<span class="sd">         [0. 0. 1.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;mean&#39;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize L1Loss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">L1Loss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">reduction</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">abs</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Abs</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits&#39;</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">abs</span><span class="p">(</span><span class="n">logits</span> <span class="o">-</span> <span class="n">labels</span><span class="p">)</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">get_loss</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">MSELoss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    MSELoss creates a criterion to measure the mean squared error (squared L2-norm) between :math:`x` and :math:`y`</span>
<span class="sd">    element-wise, where :math:`x` is the input and :math:`y` is the labels.</span>

<span class="sd">    For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,</span>
<span class="sd">    the unreduced loss (i.e. with argument reduction set to &#39;none&#39;) of :math:`x` and :math:`y` is given as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with} \quad l_n = (x_n - y_n)^2.</span>

<span class="sd">    where :math:`N` is the batch size. If `reduction` is not &#39;none&#39;, then:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \ell(x, y) =</span>
<span class="sd">        \begin{cases}</span>
<span class="sd">            \operatorname{mean}(L), &amp; \text{if reduction} = \text{&#39;mean&#39;;}\\</span>
<span class="sd">            \operatorname{sum}(L),  &amp; \text{if reduction} = \text{&#39;sum&#39;.}</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Args:</span>
<span class="sd">        reduction (str): Type of reduction to be applied to loss. The optional values are &quot;mean&quot;, &quot;sum&quot;, and &quot;none&quot;.</span>
<span class="sd">            Default: &quot;mean&quot;.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - Tensor of shape :math:`(N, *)` where :math:`*` means, any number of</span>
<span class="sd">          additional dimensions.</span>
<span class="sd">        - **labels** (Tensor) - Tensor of shape :math:`(N, *)`, same shape as the `logits` in common cases.</span>
<span class="sd">          However, it supports the shape of `logits` is different from the shape of `labels`</span>
<span class="sd">          and they should be broadcasted to each other.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, loss float tensor, the shape is zero if `reduction` is &#39;mean&#39; or &#39;sum&#39;,</span>
<span class="sd">        while the shape of output is the broadcasted shape if `reduction` is &#39;none&#39;.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If `reduction` is not one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; # Case 1: logits.shape = labels.shape = (3,)</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.MSELoss()</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([1, 1, 1]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        1.6666667</span>
<span class="sd">        &gt;&gt;&gt; # Case 2: logits.shape = (3,), labels.shape = (2, 3)</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.MSELoss(reduction=&#39;none&#39;)</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([[1, 1, 1], [1, 2, 2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0. 1. 4.]</span>
<span class="sd">         [0. 0. 1.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits&#39;</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">square</span><span class="p">(</span><span class="n">logits</span> <span class="o">-</span> <span class="n">labels</span><span class="p">)</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">get_loss</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">RMSELoss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    RMSELoss creates a criterion to measure the root mean square error between :math:`x` and :math:`y`</span>
<span class="sd">    element-wise, where :math:`x` is the input and :math:`y` is the labels.</span>

<span class="sd">    For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,</span>
<span class="sd">    the loss of :math:`x` and :math:`y` is given as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        loss = \sqrt{\frac{1}{N}\sum_{i=1}^{N}{(x_i-y_i)^2}}</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - Tensor of shape :math:`(N, *)` where :math:`*` means, any number of</span>
<span class="sd">          additional dimensions.</span>
<span class="sd">        - **labels** (Tensor) - Tensor of shape :math:`(N, *)`, same shape as the `logits` in common cases.</span>
<span class="sd">          However, it supports the shape of `logits` is different from the shape of `labels`</span>
<span class="sd">          and they should be broadcasted to each other.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, weighted loss float tensor and its shape is zero.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; # Case 1: logits.shape = labels.shape = (3,)</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.RMSELoss()</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([1, 2, 2]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        0.57735026</span>
<span class="sd">        &gt;&gt;&gt; # Case 2: logits.shape = (3,), labels.shape = (2, 3)</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.RMSELoss()</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([[1, 1, 1], [1, 2, 2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        1.0</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize RMSELoss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">RMSELoss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">MSELoss</span> <span class="o">=</span> <span class="n">MSELoss</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">label</span><span class="p">):</span>
        <span class="n">rmse_loss</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">sqrt</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">MSELoss</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="n">label</span><span class="p">))</span>

        <span class="k">return</span> <span class="n">rmse_loss</span>


<span class="k">class</span> <span class="nc">MAELoss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    MAELoss creates a criterion to measure the average absolute error between :math:`x` and :math:`y`</span>
<span class="sd">    element-wise, where :math:`x` is the input and :math:`y` is the labels.</span>

<span class="sd">    For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,</span>
<span class="sd">    the unreduced loss (i.e. with argument reduction set to &#39;none&#39;) of :math:`x` and :math:`y` is given as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with } l_n = \left| x_n - y_n \right|,</span>

<span class="sd">    where :math:`N` is the batch size. If `reduction` is not &#39;none&#39;, then:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \ell(x, y) =</span>
<span class="sd">        \begin{cases}</span>
<span class="sd">            \operatorname{mean}(L), &amp; \text{if reduction} = \text{&#39;mean&#39;;}\\</span>
<span class="sd">            \operatorname{sum}(L),  &amp; \text{if reduction} = \text{&#39;sum&#39;.}</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Args:</span>
<span class="sd">        reduction (str): Type of reduction to be applied to loss. The optional values are &quot;mean&quot;, &quot;sum&quot;, and &quot;none&quot;.</span>
<span class="sd">                         Default: &quot;mean&quot;.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - Tensor of shape :math:`(M, *)` where :math:`*` means, any number of</span>
<span class="sd">          additional dimensions.</span>
<span class="sd">        - **labels** (Tensor) - Tensor of shape :math:`(N, *)`, same shape as the `logits` in common cases.</span>
<span class="sd">          However, it supports the shape of `logits` is different from the shape of `labels`</span>
<span class="sd">          and they should be broadcasted to each other.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, weighted loss float tensor, the shape is zero if `reduction` is &#39;mean&#39; or &#39;sum&#39;,</span>
<span class="sd">        while the shape of output is the broadcasted shape if `reduction` is &#39;none&#39;.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If `reduction` is not one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; # Case 1: logits.shape = labels.shape = (3,)</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.MAELoss()</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([1, 2, 2]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        0.33333334</span>
<span class="sd">        &gt;&gt;&gt; # Case 2: logits.shape = (3,), labels.shape = (2, 3)</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.MAELoss(reduction=&#39;none&#39;)</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([[1, 1, 1], [1, 2, 2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0. 1. 2.]</span>
<span class="sd">         [0. 0. 1.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;mean&#39;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize MAELoss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">MAELoss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">reduction</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">abs</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Abs</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">label</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits&#39;</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">label</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">abs</span><span class="p">(</span><span class="n">logits</span> <span class="o">-</span> <span class="n">label</span><span class="p">)</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">get_loss</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">SmoothL1Loss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A loss class for learning region proposals.</span>

<span class="sd">    SmoothL1Loss can be regarded as modified version of L1Loss or a combination of L1Loss and L2Loss.</span>
<span class="sd">    L1Loss computes the element-wise absolute difference between two input tensors while L2Loss computes the</span>
<span class="sd">    squared difference between two input tensors. L2Loss often leads to faster convergence but it is less</span>
<span class="sd">    robust to outliers.</span>

<span class="sd">    Given two input :math:`x,\  y` of length :math:`N`, the unreduced SmoothL1Loss can be described</span>
<span class="sd">    as follows:</span>

<span class="sd">    .. math::</span>
<span class="sd">        L_{i} =</span>
<span class="sd">        \begin{cases}</span>
<span class="sd">        \frac{0.5 (x_i - y_i)^{2}}{\text{beta}}, &amp; \text{if } |x_i - y_i| &lt; \text{beta} \\</span>
<span class="sd">        |x_i - y_i| - 0.5 \text{beta}, &amp; \text{otherwise. }</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Here :math:`\text{beta}` controls the point where the loss function changes from quadratic to linear.</span>
<span class="sd">    Its default value is 1.0. :math:`N` is the batch size. This function returns an</span>
<span class="sd">    unreduced loss tensor.</span>

<span class="sd">    Args:</span>
<span class="sd">        beta (float): A parameter used to control the point where the function will change from</span>
<span class="sd">            quadratic to linear. Default: 1.0.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - Tensor of shape :math:`(N, *)` where :math:`*` means, any number of</span>
<span class="sd">          additional dimensions. Data type must be float16 or float32.</span>
<span class="sd">        - **labels** (Tensor) - Ground truth data, tensor of shape :math:`(N, *)`,</span>
<span class="sd">          same shape and dtype as the `logits`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, loss float tensor, same shape and dtype as the `logits`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `beta` is not a float.</span>
<span class="sd">        TypeError: If dtype of `logits` or `labels` is neither float16 not float32.</span>
<span class="sd">        ValueError: If `beta` is less than or equal to 0.</span>
<span class="sd">        ValueError: If shape of `logits` is not the same as `labels`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.SmoothL1Loss()</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([1, 2, 3]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([1, 2, 2]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [0.  0.  0.5]</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">beta</span><span class="o">=</span><span class="mf">1.0</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize SmoothL1Loss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">SmoothL1Loss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">beta</span> <span class="o">=</span> <span class="n">beta</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">smooth_l1_loss</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">SmoothL1Loss</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">beta</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits&#39;</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">smooth_l1_loss</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">SoftMarginLoss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    A loss class for two-class classification problems.</span>

<span class="sd">    SoftMarginLoss creates a criterion that optimizes a two-class classification</span>
<span class="sd">    logistic loss between input tensor :math:`x` and labels tensor :math:`y`</span>
<span class="sd">    (containing 1 or -1).</span>

<span class="sd">    .. math::</span>
<span class="sd">        \text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()}</span>

<span class="sd">    Args:</span>
<span class="sd">        reduction (str): Apply specific reduction method to the output: &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;. Default: &quot;mean&quot;.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - Predict data. Data type must be float16 or float32.</span>
<span class="sd">        - **labels** (Tensor) - Ground truth data, with the same type and shape as `logits`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor or Scalar, if `reduction` is &quot;none&quot;, its shape is the same as `logits`.</span>
<span class="sd">        Otherwise, a scalar value will be returned.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `logits` or `labels` is not a Tensor.</span>
<span class="sd">        TypeError: If dtype of `logits` or `labels` is neither float16 nor float32.</span>
<span class="sd">        ValueError: If shape of `logits` is not the same as `labels`.</span>
<span class="sd">        ValueError: If `reduction` is not one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.SoftMarginLoss()</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([[0.3, 0.7], [0.5, 0.5]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([[-1, 1], [1, -1]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        0.6764238</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;mean&#39;</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">SoftMarginLoss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">soft_margin_loss</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">SoftMarginLoss</span><span class="p">(</span><span class="n">reduction</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">soft_margin_loss</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">SoftmaxCrossEntropyWithLogits</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Computes softmax cross entropy between logits and labels.</span>

<span class="sd">    Measures the distribution error between the probabilities of the input (computed with softmax function) and the</span>
<span class="sd">    labels where the classes are mutually exclusive (only one class is positive) using cross entropy loss.</span>

<span class="sd">    Typical input into this function is unnormalized scores denoted as x whose shape is (N, C),</span>
<span class="sd">    and the corresponding targets.</span>

<span class="sd">    For each instance :math:`x_i`, i ranges from 0 to N-1, the loss is given as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        \ell(x_i, c) = - \log\left(\frac{\exp(x_i[c])}{\sum_j \exp(x_i[j])}\right)</span>
<span class="sd">        =  -x_i[c] + \log\left(\sum_j \exp(x_i[j])\right)</span>

<span class="sd">    where :math:`x_i` is a 1D score Tensor, :math:`c` is the index of 1 in one-hot.</span>

<span class="sd">    Note:</span>
<span class="sd">        While the labels classes are mutually exclusive, i.e., only one class is positive in the labels, the predicted</span>
<span class="sd">        probabilities does not need to be exclusive. It is only required that the predicted probability distribution</span>
<span class="sd">        of entry is a valid one.</span>

<span class="sd">    Args:</span>
<span class="sd">        sparse (bool): Specifies whether labels use sparse format or not. Default: False.</span>
<span class="sd">        reduction (str): Type of reduction to be applied to loss. The optional values are &quot;mean&quot;, &quot;sum&quot;, and &quot;none&quot;.</span>
<span class="sd">            If &quot;none&quot;, do not perform reduction. Default: &quot;none&quot;.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - Tensor of shape (N, C). Data type must be float16 or float32.</span>
<span class="sd">        - **labels** (Tensor) - Tensor of shape (N, ). If `sparse` is True, The type of</span>
<span class="sd">          `labels` is int32 or int64. Otherwise, the type of `labels` is the same as the type of `logits`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, a tensor of the same shape and type as logits with the component-wise logistic losses.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `sparse` is not a bool.</span>
<span class="sd">        TypeError: If `sparse` is True and dtype of `labels` is neither int32 not int64.</span>
<span class="sd">        TypeError: If `sparse` is False and dtype of `labels` is neither float16 not float32.</span>
<span class="sd">        ValueError: If `reduction` is not one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; # case 1: sparse=True</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([[3, 5, 6, 9, 12, 33, 42, 12, 32, 72]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels_np = np.array([1]).astype(np.int32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(labels_np)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [67.]</span>
<span class="sd">        &gt;&gt;&gt; # case 2: sparse=False</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([[3, 5, 6, 9, 12, 33, 42, 12, 32, 72]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels_np = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]).astype(np.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(labels_np)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [30.]</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
                 <span class="n">sparse</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;none&#39;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize SoftmaxCrossEntropyWithLogits.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">reduction</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">sparse</span> <span class="o">=</span> <span class="n">validator</span><span class="o">.</span><span class="n">check_bool</span><span class="p">(</span><span class="n">sparse</span><span class="p">,</span> <span class="s2">&quot;sparse&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">reduction</span> <span class="o">=</span> <span class="n">reduction</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">softmax_cross_entropy</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">one_hot</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">OneHot</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">on_value</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="mf">1.0</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">off_value</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="mf">0.</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">is_cpugpu</span> <span class="o">=</span> <span class="n">context</span><span class="o">.</span><span class="n">get_context</span><span class="p">(</span><span class="s1">&#39;device_target&#39;</span><span class="p">)</span> <span class="ow">in</span> <span class="p">[</span><span class="s2">&quot;CPU&quot;</span><span class="p">,</span> <span class="s2">&quot;GPU&quot;</span><span class="p">]</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">sparse_softmax_cross_entropy</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">SparseSoftmaxCrossEntropyWithLogits</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits&#39;</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">sparse</span><span class="p">:</span>
            <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduction</span> <span class="o">==</span> <span class="s1">&#39;mean&#39;</span><span class="p">:</span>
                <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">sparse_softmax_cross_entropy</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">)</span>
                <span class="k">return</span> <span class="n">x</span>
            <span class="n">labels</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">one_hot</span><span class="p">(</span><span class="n">labels</span><span class="p">,</span> <span class="n">F</span><span class="o">.</span><span class="n">shape</span><span class="p">(</span><span class="n">logits</span><span class="p">)[</span><span class="o">-</span><span class="mi">1</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">on_value</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">off_value</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">softmax_cross_entropy</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">)[</span><span class="mi">0</span><span class="p">]</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">get_loss</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>


<span class="nd">@constexpr</span>
<span class="k">def</span> <span class="nf">_check_label_dtype</span><span class="p">(</span><span class="n">labels_dtype</span><span class="p">,</span> <span class="n">cls_name</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Internal function, used to check whether the data type of labels meets the requirements.&quot;&quot;&quot;</span>
    <span class="n">validator</span><span class="o">.</span><span class="n">check_type_name</span><span class="p">(</span><span class="s2">&quot;labels&quot;</span><span class="p">,</span> <span class="n">labels_dtype</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="n">cls_name</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">DiceLoss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    The Dice coefficient is a set similarity loss. It is used to calculate the similarity between two samples. The</span>
<span class="sd">    value of the Dice coefficient is 1 when the segmentation result is the best and is 0 when the segmentation result</span>
<span class="sd">    is the worst. The Dice coefficient indicates the ratio of the area between two objects to the total area.</span>
<span class="sd">    The function is shown as follows:</span>

<span class="sd">    .. math::</span>
<span class="sd">        dice = 1 - \frac{2 * (pred \bigcap true)}{pred \bigcup true}</span>

<span class="sd">    Args:</span>
<span class="sd">        smooth (float): A term added to the denominator to improve numerical stability. Should be greater than 0.</span>
<span class="sd">                        Default: 1e-5.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - Tensor of shape :math:`(N, *)` where :math:`*` means, any number of</span>
<span class="sd">          additional dimensions. The data type must be float16 or float32.</span>
<span class="sd">        - **labels** (Tensor) - Tensor of shape :math:`(N, *)`, same shape as the `logits`.</span>
<span class="sd">          The data type must be float16 or float32.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, a tensor of shape with the per-example sampled Dice losses.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If the dimension of `logits` is different from `labels`.</span>
<span class="sd">        TypeError: If the type of `logits` or `labels` is not a tensor.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.DiceLoss(smooth=1e-5)</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mstype.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([[0, 1], [1, 0], [0, 1]]), mstype.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        0.38596618</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">smooth</span><span class="o">=</span><span class="mf">1e-5</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize DiceLoss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">DiceLoss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">smooth</span> <span class="o">=</span> <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_float</span><span class="p">(</span><span class="n">smooth</span><span class="p">,</span> <span class="s2">&quot;smooth&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Reshape</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">label</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits&#39;</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">label</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_shape</span><span class="p">(</span><span class="n">logits</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">label</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">intersection</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">mul</span><span class="p">(</span><span class="n">logits</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">),</span> <span class="n">label</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)))</span>
        <span class="n">unionset</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">mul</span><span class="p">(</span><span class="n">logits</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">),</span> <span class="n">logits</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)))</span> <span class="o">+</span> \
                   <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">mul</span><span class="p">(</span><span class="n">label</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">),</span> <span class="n">label</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)))</span>

        <span class="n">single_dice_coeff</span> <span class="o">=</span> <span class="p">(</span><span class="mi">2</span> <span class="o">*</span> <span class="n">intersection</span><span class="p">)</span> <span class="o">/</span> <span class="p">(</span><span class="n">unionset</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">smooth</span><span class="p">)</span>
        <span class="n">dice_loss</span> <span class="o">=</span> <span class="mi">1</span> <span class="o">-</span> <span class="n">single_dice_coeff</span>

        <span class="k">return</span> <span class="n">dice_loss</span>


<span class="nd">@constexpr</span>
<span class="k">def</span> <span class="nf">_check_shape</span><span class="p">(</span><span class="n">logits_shape</span><span class="p">,</span> <span class="n">label_shape</span><span class="p">,</span> <span class="n">prim_name</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Internal function, used to check whether the shape of logits and labels meets the requirements.&quot;&quot;&quot;</span>
    <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;logits_shape&#39;</span><span class="p">,</span> <span class="n">logits_shape</span><span class="p">,</span> <span class="s1">&#39;label_shape&#39;</span><span class="p">,</span> <span class="n">label_shape</span><span class="p">,</span> <span class="n">prim_name</span><span class="o">=</span><span class="n">prim_name</span><span class="p">)</span>


<span class="nd">@constexpr</span>
<span class="k">def</span> <span class="nf">_check_ndim_multi</span><span class="p">(</span><span class="n">logits_dim</span><span class="p">,</span> <span class="n">label_dim</span><span class="p">,</span> <span class="n">prim_name</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Internal function, used to check whether the dimension of logits and label meets the requirements.&quot;&quot;&quot;</span>
    <span class="n">msg_prefix</span> <span class="o">=</span> <span class="sa">f</span><span class="s1">&#39;For </span><span class="se">\&#39;</span><span class="si">{</span><span class="n">prim_name</span><span class="si">}</span><span class="se">\&#39;</span><span class="s1">, the&#39;</span> <span class="k">if</span> <span class="n">prim_name</span> <span class="k">else</span> <span class="s2">&quot;The&quot;</span>
    <span class="k">if</span> <span class="n">logits_dim</span> <span class="o">&lt;</span> <span class="mi">2</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">msg_prefix</span><span class="si">}</span><span class="s2"> &#39;logits&#39; dimension should be greater than 1, but got </span><span class="si">{</span><span class="n">logits_dim</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">label_dim</span> <span class="o">&lt;</span> <span class="mi">2</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">msg_prefix</span><span class="si">}</span><span class="s2"> &#39;labels&#39; dimension should be greater than 1, but got </span><span class="si">{</span><span class="n">label_dim</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>


<span class="nd">@constexpr</span>
<span class="k">def</span> <span class="nf">_check_weights</span><span class="p">(</span><span class="n">weight_shape</span><span class="p">,</span> <span class="n">label_shape</span><span class="p">,</span> <span class="n">prim_name</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Internal function, used to check whether the reduced shape meets the requirements.&quot;&quot;&quot;</span>
    <span class="n">msg_prefix</span> <span class="o">=</span> <span class="sa">f</span><span class="s1">&#39;For </span><span class="se">\&#39;</span><span class="si">{</span><span class="n">prim_name</span><span class="si">}</span><span class="se">\&#39;</span><span class="s1">, the&#39;</span> <span class="k">if</span> <span class="n">prim_name</span> <span class="k">else</span> <span class="s2">&quot;The&quot;</span>
    <span class="k">if</span> <span class="n">weight_shape</span> <span class="o">!=</span> <span class="n">label_shape</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">msg_prefix</span><span class="si">}</span><span class="s2"> weight_shape[0] should be equal to label_shape[1], &quot;</span>
                         <span class="sa">f</span><span class="s2">&quot;but got weight_shape[0]: </span><span class="si">{</span><span class="n">weight_shape</span><span class="si">}</span><span class="s2"> and label_shape[1]: </span><span class="si">{</span><span class="n">label_shape</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>


<div class="viewcode-block" id="MultiClassDiceLoss"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.MultiClassDiceLoss.html#mindspore.nn.MultiClassDiceLoss">[docs]</a><span class="k">class</span> <span class="nc">MultiClassDiceLoss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    When there are multiple classifications, label is transformed into multiple binary classifications by one hot.</span>
<span class="sd">    For each channel section in the channel, it can be regarded as a binary classification problem, so it can be</span>
<span class="sd">    obtained through the binary loss of each category, and then the average value.</span>

<span class="sd">    Args:</span>
<span class="sd">        weights (Union[Tensor, None]): Tensor of shape :math:`(num\_classes, dim)`. The weight shape[0] should be</span>
<span class="sd">            equal to labels shape[1].</span>
<span class="sd">            Default: None.</span>
<span class="sd">        ignore_indiex (Union[int, None]): Class index to ignore.</span>
<span class="sd">            Default: None.</span>
<span class="sd">        activation (Union[str, Cell]): Activate function applied to the output of the fully connected layer, eg. &#39;ReLU&#39;.</span>
<span class="sd">            Default: &#39;softmax&#39;. Choose from: [&#39;softmax&#39;, &#39;logsoftmax&#39;, &#39;relu&#39;, &#39;relu6&#39;, &#39;tanh&#39;,&#39;Sigmoid&#39;]</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - Tensor of shape :math:`(N, C, *)` where :math:`*` means, any number of additional</span>
<span class="sd">          dimensions. The logits dimension should be greater than 1. The data type must be float16 or float32.</span>
<span class="sd">        - **labels** (Tensor) - Tensor of shape :math:`(N, C, *)`, same shape as the `logits`.</span>
<span class="sd">          The labels dimension should be greater than 1. The data type must be float16 or float32.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, a tensor of shape with the per-example sampled MultiClass Dice Losses.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If the shape of `logits` is different from `labels`.</span>
<span class="sd">        TypeError: If the type of `logits` or `labels` is not a tensor.</span>
<span class="sd">        ValueError: If the dimension of `logits` or `labels` is less than 2.</span>
<span class="sd">        ValueError: If the weights.shape[0] is not equal to labels.shape[1].</span>
<span class="sd">        ValueError: If `weights` is a tensor, but its dimension is not 2.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.MultiClassDiceLoss(weights=None, ignore_indiex=None, activation=&quot;softmax&quot;)</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([[0.2, 0.5, 0.7], [0.3, 0.1, 0.5], [0.9, 0.6, 0.3]]), mstype.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]), mstype.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        0.54958105</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">weights</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">ignore_indiex</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">activation</span><span class="o">=</span><span class="s2">&quot;softmax&quot;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize MultiClassDiceLoss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">MultiClassDiceLoss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="n">activation_list</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;softmax&#39;</span><span class="p">,</span> <span class="s1">&#39;logsoftmax&#39;</span><span class="p">,</span> <span class="s1">&#39;relu&#39;</span><span class="p">,</span> <span class="s1">&#39;relu6&#39;</span><span class="p">,</span> <span class="s1">&#39;tanh&#39;</span><span class="p">,</span> <span class="s1">&#39;sigmoid&#39;</span><span class="p">]</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">binarydiceloss</span> <span class="o">=</span> <span class="n">DiceLoss</span><span class="p">(</span><span class="n">smooth</span><span class="o">=</span><span class="mf">1e-5</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">weights</span> <span class="o">=</span> <span class="n">weights</span> <span class="k">if</span> <span class="n">weights</span> <span class="ow">is</span> <span class="kc">None</span> <span class="k">else</span> <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;weights&quot;</span><span class="p">,</span> <span class="n">weights</span><span class="p">,</span> <span class="p">[</span><span class="n">Tensor</span><span class="p">])</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">weights</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">)</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">weights</span><span class="o">.</span><span class="n">ndim</span> <span class="o">!=</span> <span class="mi">2</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the dimension of &#39;weights&#39; should be 2, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">weights</span><span class="o">.</span><span class="n">ndim</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">ignore_indiex</span> <span class="o">=</span> <span class="n">ignore_indiex</span> <span class="k">if</span> <span class="n">ignore_indiex</span> <span class="ow">is</span> <span class="kc">None</span> <span class="k">else</span> \
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;ignore_indiex&quot;</span><span class="p">,</span> <span class="n">ignore_indiex</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">])</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">activation</span><span class="p">,</span> <span class="nb">str</span><span class="p">)</span> <span class="ow">and</span> <span class="n">activation</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">activation_list</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;activation&#39; must be in </span><span class="si">{</span><span class="n">activation_list</span><span class="si">}</span><span class="s2">, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="n">activation</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">activation</span> <span class="o">=</span> <span class="n">get_activation</span><span class="p">(</span><span class="n">activation</span><span class="p">)</span> <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">activation</span><span class="p">,</span> <span class="nb">str</span><span class="p">)</span> <span class="k">else</span> <span class="n">activation</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">activation</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">activation</span><span class="p">,</span> <span class="n">Cell</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;activation&#39; must be str or Cell, &quot;</span>
                            <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="nb">type</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">activation</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Reshape</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">label</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits&#39;</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">label</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_shape</span><span class="p">(</span><span class="n">logits</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">label</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_ndim_multi</span><span class="p">(</span><span class="n">logits</span><span class="o">.</span><span class="n">ndim</span><span class="p">,</span> <span class="n">label</span><span class="o">.</span><span class="n">ndim</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">total_loss</span> <span class="o">=</span> <span class="mi">0</span>

        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">activation</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">logits</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">activation</span><span class="p">(</span><span class="n">logits</span><span class="p">)</span>

        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">label</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]):</span>
            <span class="k">if</span> <span class="n">i</span> <span class="o">!=</span> <span class="bp">self</span><span class="o">.</span><span class="n">ignore_indiex</span><span class="p">:</span>
                <span class="n">dice_loss</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">binarydiceloss</span><span class="p">(</span><span class="n">logits</span><span class="p">[:,</span> <span class="n">i</span><span class="p">],</span> <span class="n">label</span><span class="p">[:,</span> <span class="n">i</span><span class="p">])</span>
                <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">weights</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
                    <span class="n">_check_weights</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">weights</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">label</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
                    <span class="n">dice_loss</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">weights</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
                <span class="n">total_loss</span> <span class="o">+=</span> <span class="n">dice_loss</span>

        <span class="k">return</span> <span class="n">total_loss</span><span class="o">/</span><span class="n">label</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span></div>


<span class="k">class</span> <span class="nc">SampledSoftmaxLoss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Computes the sampled softmax training loss. This operator can accelerate the training of the softmax classifier</span>
<span class="sd">    over a large number of classes. It is generally an underestimate of the full softmax loss.</span>

<span class="sd">    Args:</span>
<span class="sd">        num_sampled (int): The number of classes to randomly sample per batch.</span>
<span class="sd">        num_classes (int): The number of possible classes.</span>
<span class="sd">        num_true (int): The number of labels classes per training example. Default: 1.</span>
<span class="sd">        sampled_values (Union[list, tuple]):  List or tuple of (`sampled_candidates`, `true_expected_count`,</span>
<span class="sd">            `sampled_expected_count`) returned by a `*CandidateSampler` function.</span>
<span class="sd">            Default to None, `UniformCandidateSampler` is applied.</span>
<span class="sd">        remove_accidental_hits (bool): Whether to remove &quot;accidental hits&quot;</span>
<span class="sd">            where a sampled class equals to one of the labels classes. Default: True.</span>
<span class="sd">        seed (int): Random seed for candidate sampling. Default: 0</span>
<span class="sd">        reduction (str): Type of reduction to be applied to loss. The optional values are &quot;mean&quot;, &quot;sum&quot;, and &quot;none&quot;.</span>
<span class="sd">            If &quot;none&quot;, do not perform reduction. Default: &quot;none&quot;.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **weights** (Tensor) - Tensor of shape :math:`(C, dim)`.</span>
<span class="sd">        - **bias** (Tensor) - Tensor of shape :math:`(C,)`. The class biases.</span>
<span class="sd">        - **labels** (Tensor) - Tensor of shape :math:`(N, num\_true)`, type `int64, int32`. The labels classes.</span>
<span class="sd">        - **logits** (Tensor) - Tensor of shape :math:`(N, dim)`. The forward activations of the input network.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor or Scalar, if `reduction` is &#39;none&#39;, then output is a tensor with shape :math:`(N,)`.</span>
<span class="sd">        Otherwise, the output is a scalar.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `sampled_values` is not a list or tuple.</span>
<span class="sd">        TypeError: If dtype of `labels` is neither int32 not int64.</span>
<span class="sd">        ValueError: If `reduction` is not one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;.</span>
<span class="sd">        ValueError: If `num_sampled` or `num_true` is greater than `num_classes`.</span>
<span class="sd">        ValueError: If length of `sampled_values` is not equal to 3.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; mindspore.set_seed(1)</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.SampledSoftmaxLoss(num_sampled=4, num_classes=7, num_true=1)</span>
<span class="sd">        &gt;&gt;&gt; weights = Tensor(np.random.randint(0, 9, [7, 10]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; biases = Tensor(np.random.randint(0, 9, [7]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor([0, 1, 2])</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.random.randint(0, 9, [3, 10]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(weights, biases, labels, logits)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [4.6051701e+01 1.4000047e+01 6.1989022e-06]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">num_sampled</span><span class="p">,</span> <span class="n">num_classes</span><span class="p">,</span> <span class="n">num_true</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">sampled_values</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">remove_accidental_hits</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">seed</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
                 <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;none&#39;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize SampledSoftmaxLoss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">SampledSoftmaxLoss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">reduction</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">num_true</span> <span class="o">&lt;</span> <span class="mi">1</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;num_true&#39; must be greater than or equal to 1, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="n">num_true</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">seed</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;seed&#39; must be greater than or equal to 0, but got </span><span class="si">{</span><span class="n">seed</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">num_sampled</span> <span class="o">&gt;</span> <span class="n">num_classes</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;num_sampled&#39; must be smaller than or &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;equal to &#39;num_classes&#39;, but got &#39;num_sampled&#39;: </span><span class="si">{</span><span class="n">num_sampled</span><span class="si">}</span><span class="s2"> &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;and &#39;num_classes&#39;: </span><span class="si">{</span><span class="n">num_classes</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">num_true</span> <span class="o">&gt;</span> <span class="n">num_classes</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;num_true&#39; must be smaller than or equal to &#39;num_classes&#39;, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got &#39;num_true&#39;: </span><span class="si">{</span><span class="n">num_true</span><span class="si">}</span><span class="s2"> amd &#39;num_classes&#39;: </span><span class="si">{</span><span class="n">num_classes</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">sampled_values</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">sampled_values</span><span class="p">,</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)):</span>
                <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the type of &#39;sampled_values&#39; must be a list or tuple, &quot;</span>
                                <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="nb">type</span><span class="p">(</span><span class="n">sampled_values</span><span class="p">)</span><span class="o">.</span><span class="vm">__name__</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
            <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">sampled_values</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">3</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the length of &#39;sampled_values&#39; must be equal to 3,&quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">sampled_values</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">num_sampled</span> <span class="o">=</span> <span class="n">num_sampled</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">num_classes</span> <span class="o">=</span> <span class="n">num_classes</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">num_true</span> <span class="o">=</span> <span class="n">num_true</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">sampled_values</span> <span class="o">=</span> <span class="n">sampled_values</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">remove_accidental_hits</span> <span class="o">=</span> <span class="n">remove_accidental_hits</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">seed</span> <span class="o">=</span> <span class="n">seed</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">sampler</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">UniformCandidateSampler</span><span class="p">(</span>
            <span class="n">num_true</span><span class="p">,</span>
            <span class="n">num_sampled</span><span class="p">,</span>
            <span class="kc">True</span><span class="p">,</span>
            <span class="n">num_classes</span><span class="p">,</span>
            <span class="n">seed</span><span class="p">,</span>
            <span class="n">remove_accidental_hits</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">cast</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Cast</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Reshape</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">shape</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Shape</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">exp</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Exp</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">log</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Log</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">slice_op</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Slice</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">matmul</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">MatMul</span><span class="p">(</span><span class="kc">False</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">gather_v2</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Gather</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">reduce_max_true</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">ReduceMax</span><span class="p">(</span><span class="kc">True</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">ReduceSum</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum_true</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">ReduceSum</span><span class="p">(</span><span class="kc">True</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">concat_dim0</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Concat</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">concat_dim1</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Concat</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">ones_like</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">OnesLike</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">zeros_like</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">ZerosLike</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">mul</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Mul</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">expand_dims</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">ExpandDims</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dtype</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">DType</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">weights</span><span class="p">,</span> <span class="n">biases</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="n">logits</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;weights&#39;</span><span class="p">,</span> <span class="n">weights</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;biases&#39;</span><span class="p">,</span> <span class="n">biases</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits&#39;</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_label_dtype</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dtype</span><span class="p">(</span><span class="n">labels</span><span class="p">),</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>

        <span class="n">logits</span><span class="p">,</span> <span class="n">labels</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_compute_sampled_logits</span><span class="p">(</span>
            <span class="n">weights</span><span class="o">=</span><span class="n">weights</span><span class="p">,</span>
            <span class="n">biases</span><span class="o">=</span><span class="n">biases</span><span class="p">,</span>
            <span class="n">labels</span><span class="o">=</span><span class="n">labels</span><span class="p">,</span>
            <span class="n">logits</span><span class="o">=</span><span class="n">logits</span><span class="p">,</span>
            <span class="n">num_true</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">num_true</span><span class="p">,</span>
            <span class="n">sampled_values</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">sampled_values</span><span class="p">,</span>
            <span class="n">subtract_log_q</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>

        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_softmax_cross_entropy</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span>

    <span class="k">def</span> <span class="nf">_softmax_cross_entropy</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">targets</span><span class="p">):</span>
        <span class="n">stable_exp_logits</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">exp</span><span class="p">(</span><span class="n">logits</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduce_max_true</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
        <span class="n">pred</span> <span class="o">=</span> <span class="n">stable_exp_logits</span> <span class="o">/</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum_true</span><span class="p">(</span><span class="n">stable_exp_logits</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
        <span class="k">return</span> <span class="o">-</span><span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="n">targets</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">log</span><span class="p">(</span><span class="n">pred</span> <span class="o">+</span> <span class="mf">1.0e-20</span><span class="p">),</span> <span class="mi">1</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">_compute_sampled_logits</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">weights</span><span class="p">,</span>
                                <span class="n">biases</span><span class="p">,</span>
                                <span class="n">labels</span><span class="p">,</span>
                                <span class="n">logits</span><span class="p">,</span>
                                <span class="n">num_true</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                                <span class="n">sampled_values</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                                <span class="n">subtract_log_q</span><span class="o">=</span><span class="kc">True</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Helper function for SampledSoftmaxLoss functions.</span>

<span class="sd">        Computes sampled output training logits and labels suitable</span>

<span class="sd">        Note: In the case where num_true &gt; 1, we assign to each labels class</span>
<span class="sd">        with the labels probability (1/num_true) so that the labels probabilities</span>
<span class="sd">        sum to 1 per-example.</span>

<span class="sd">        Args:</span>
<span class="sd">            weights (Tensor): Tensor of shape `[num_classes, dim]`.</span>
<span class="sd">            biases (Tensor): Tensor of shape `[num_classes]`.</span>
<span class="sd">            labels (Tensor): Tensor of shape `[batch_size, num_true]`. The labels classes.</span>
<span class="sd">            logits (Tensor): Tensor of shape `[batch_size, dim]`. The forward</span>
<span class="sd">                activations of the input network.</span>
<span class="sd">            num_true (int): The number of labels classes per training example.</span>
<span class="sd">            sampled_values: A tuple of (`sampled_candidates`, `true_expected_count`,</span>
<span class="sd">                `sampled_expected_count`) returned by a `UniformCandidateSampler` function.</span>
<span class="sd">            subtract_log_q: A `bool`. whether to subtract the log expected count of</span>
<span class="sd">                the labels in the sample to get the logits of the true labels. Default: True.</span>
<span class="sd">        Returns:</span>
<span class="sd">            out_logits: `Tensor` object with shape</span>
<span class="sd">                `[batch_size, num_true + num_sampled]`</span>
<span class="sd">            out_labels: A tensor object with the same shape as `out_logits`.</span>
<span class="sd">        &quot;&quot;&quot;</span>

        <span class="k">if</span> <span class="ow">not</span> <span class="n">labels</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">:</span>
            <span class="n">labels</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">cast</span><span class="p">(</span><span class="n">labels</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span>
        <span class="n">labels</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">labels</span><span class="p">,</span> <span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="n">num_true</span><span class="p">))</span>
        <span class="n">labels_flat</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">labels</span><span class="p">,</span> <span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,))</span>

        <span class="c1"># Sample the negative labels.</span>
        <span class="c1">#   sampled shape: [num_sampled] tensor</span>
        <span class="c1">#   true_expected_count shape is [batch_size, 1] tensor</span>
        <span class="c1">#   sampled_expected_count shape is [num_sampled] tensor</span>
        <span class="k">if</span> <span class="n">sampled_values</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">sampled_values</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">sampler</span><span class="p">(</span><span class="n">labels</span><span class="p">)</span>

        <span class="p">(</span><span class="n">sampled</span><span class="p">,</span> <span class="n">true_expected_count</span><span class="p">,</span> <span class="n">sampled_expected_count</span><span class="p">)</span> <span class="o">=</span> <span class="n">sampled_values</span>

        <span class="k">if</span> <span class="ow">not</span> <span class="n">sampled</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">:</span>
            <span class="n">sampled</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">cast</span><span class="p">(</span><span class="n">sampled</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span>
        <span class="n">all_ids</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">concat_dim0</span><span class="p">((</span><span class="n">labels_flat</span><span class="p">,</span> <span class="n">sampled</span><span class="p">))</span>
        <span class="n">all_w</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">gather_v2</span><span class="p">(</span><span class="n">weights</span><span class="p">,</span> <span class="n">all_ids</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>

        <span class="n">n_true</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">shape</span><span class="p">(</span><span class="n">labels_flat</span><span class="p">)[</span><span class="mi">0</span><span class="p">]</span>
        <span class="n">n_sampled</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">shape</span><span class="p">(</span><span class="n">sampled</span><span class="p">)[</span><span class="mi">0</span><span class="p">]</span>
        <span class="n">n_dim</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">shape</span><span class="p">(</span><span class="n">all_w</span><span class="p">)[</span><span class="mi">1</span><span class="p">]</span>

        <span class="n">true_w</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">slice_op</span><span class="p">(</span><span class="n">all_w</span><span class="p">,</span> <span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span> <span class="p">[</span><span class="n">n_true</span><span class="p">,</span> <span class="n">n_dim</span><span class="p">])</span>
        <span class="n">sampled_w</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">slice_op</span><span class="p">(</span><span class="n">all_w</span><span class="p">,</span> <span class="p">[</span><span class="n">n_true</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span> <span class="p">[</span><span class="n">n_sampled</span><span class="p">,</span> <span class="n">n_dim</span><span class="p">])</span>
        <span class="n">sampled_logits</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="n">sampled_w</span><span class="p">)</span>

        <span class="n">all_b</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">gather_v2</span><span class="p">(</span><span class="n">biases</span><span class="p">,</span> <span class="n">all_ids</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>
        <span class="n">true_b</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">slice_op</span><span class="p">(</span><span class="n">all_b</span><span class="p">,</span> <span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="p">[</span><span class="n">n_true</span><span class="p">])</span>
        <span class="n">sampled_b</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">slice_op</span><span class="p">(</span><span class="n">all_b</span><span class="p">,</span> <span class="p">[</span><span class="n">n_true</span><span class="p">],</span> <span class="p">[</span><span class="n">n_sampled</span><span class="p">])</span>

        <span class="n">new_true_w_shape</span> <span class="o">=</span> <span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="n">num_true</span><span class="p">,</span> <span class="n">n_dim</span><span class="p">)</span>
        <span class="n">row_wise_dots</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">mul</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">expand_dims</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span>
                                 <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">true_w</span><span class="p">,</span> <span class="n">new_true_w_shape</span><span class="p">))</span>

        <span class="c1"># We want the row-wise dot plus biases which yields a</span>
        <span class="c1"># [batch_size, num_true] tensor of true_logits.</span>
        <span class="n">dots_as_matrix</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">row_wise_dots</span><span class="p">,</span> <span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="n">n_dim</span><span class="p">))</span>
        <span class="n">true_logits</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="n">dots_as_matrix</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span> <span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="n">num_true</span><span class="p">))</span>
        <span class="n">true_b</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">true_b</span><span class="p">,</span> <span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="n">num_true</span><span class="p">))</span>
        <span class="n">true_logits</span> <span class="o">+=</span> <span class="n">true_b</span>
        <span class="n">sampled_logits</span> <span class="o">+=</span> <span class="n">sampled_b</span>

        <span class="k">if</span> <span class="n">subtract_log_q</span><span class="p">:</span>
            <span class="c1"># Subtract log of Q(l), prior probability that l appears in sampled.</span>
            <span class="n">true_logits</span> <span class="o">-=</span> <span class="bp">self</span><span class="o">.</span><span class="n">log</span><span class="p">(</span><span class="n">true_expected_count</span><span class="p">)</span>
            <span class="n">sampled_logits</span> <span class="o">-=</span> <span class="bp">self</span><span class="o">.</span><span class="n">log</span><span class="p">(</span><span class="n">sampled_expected_count</span><span class="p">)</span>

        <span class="c1"># Construct output logits and labels. The true labels/logits start at col 0.</span>
        <span class="n">out_logits</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">concat_dim1</span><span class="p">((</span><span class="n">true_logits</span><span class="p">,</span> <span class="n">sampled_logits</span><span class="p">))</span>

        <span class="c1"># true_logits is a float tensor, ones_like(true_logits) is a float</span>
        <span class="c1"># tensor of ones. We then divide by num_true to ensure the per-example</span>
        <span class="c1"># labels sum to 1.0, i.e. form a proper probability distribution.</span>
        <span class="n">out_labels</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">concat_dim1</span><span class="p">((</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">ones_like</span><span class="p">(</span><span class="n">true_logits</span><span class="p">)</span> <span class="o">/</span> <span class="n">num_true</span><span class="p">,</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">zeros_like</span><span class="p">(</span><span class="n">sampled_logits</span><span class="p">)</span>
        <span class="p">))</span>
        <span class="k">return</span> <span class="n">out_logits</span><span class="p">,</span> <span class="n">out_labels</span>


<span class="k">class</span> <span class="nc">BCELoss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    BCELoss creates a criterion to measure the binary cross entropy between the true labels and predicted labels.</span>

<span class="sd">    Set the predicted labels as :math:`x`, true labels as :math:`y`, the output loss as :math:`\ell(x, y)`.</span>
<span class="sd">    Let,</span>

<span class="sd">    .. math::</span>
<span class="sd">        L = \{l_1,\dots,l_N\}^\top, \quad</span>
<span class="sd">        l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right]</span>

<span class="sd">    where N is the batch size. Then,</span>

<span class="sd">    .. math::</span>
<span class="sd">        \ell(x, y) = \begin{cases}</span>
<span class="sd">        L, &amp; \text{if reduction} = \text{&#39;none&#39;;}\\</span>
<span class="sd">        \operatorname{mean}(L), &amp; \text{if reduction} = \text{&#39;mean&#39;;}\\</span>
<span class="sd">        \operatorname{sum}(L),  &amp; \text{if reduction} = \text{&#39;sum&#39;.}</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Note:</span>
<span class="sd">        Note that the predicted labels should always be the output of sigmoid. Because it is a two-class</span>
<span class="sd">        classification, the true labels should be numbers between 0 and 1.</span>
<span class="sd">        And if input is either 0 or 1, one of the log terms would be mathematically undefined in the above loss</span>
<span class="sd">        equation.</span>

<span class="sd">    Args:</span>
<span class="sd">        weight (Tensor, optional): A rescaling weight applied to the loss of each batch element.</span>
<span class="sd">            And it must have the same shape and data type as `inputs`. Default: None</span>
<span class="sd">        reduction (str): Specifies the reduction to be applied to the output.</span>
<span class="sd">            Its value must be one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;. Default: &#39;none&#39;.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - The input tensor with shape :math:`(N, *)` where :math:`*` means, any number</span>
<span class="sd">          of additional dimensions. The data type must be float16 or float32.</span>
<span class="sd">        - **labels** (Tensor) - The label tensor with shape :math:`(N, *)`, the same shape and data type as `logits`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor or Scalar, if `reduction` is &#39;none&#39;, then output is a tensor and has the same shape as `logits`.</span>
<span class="sd">        Otherwise, the output is a scalar.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `logits`, `labels` or `weight` (if given) is neither float16 not float32.</span>
<span class="sd">        ValueError: If `reduction` is not one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;.</span>
<span class="sd">        ValueError: If shape of `logits` is not the same as `labels` or `weight` (if given).</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; weight = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 3.3, 2.2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.BCELoss(weight=weight, reduction=&#39;mean&#39;)</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([[0, 1, 0], [0, 0, 1]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        1.8952923</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">weight</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;none&#39;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize BCELoss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">BCELoss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">binary_cross_entropy</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">BinaryCrossEntropy</span><span class="p">(</span><span class="n">reduction</span><span class="o">=</span><span class="n">reduction</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">weight_one</span> <span class="o">=</span> <span class="n">weight</span> <span class="ow">is</span> <span class="kc">None</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight_one</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">weight</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">ones</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">OnesLike</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits&#39;</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight_one</span><span class="p">:</span>
            <span class="n">weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">ones</span><span class="p">(</span><span class="n">logits</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span>
        <span class="n">loss</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">binary_cross_entropy</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="n">weight</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">loss</span>


<span class="nd">@constexpr</span>
<span class="k">def</span> <span class="nf">_check_reduced_shape_valid</span><span class="p">(</span><span class="n">ori_shape</span><span class="p">,</span> <span class="n">reduced_shape</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="n">cls_name</span><span class="p">,</span> <span class="n">arg_name1</span><span class="p">,</span> <span class="n">arg_name2</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Internal function, used to check whether the reduced shape meets the requirements.&quot;&quot;&quot;</span>
    <span class="n">validator</span><span class="o">.</span><span class="n">check_reduce_shape</span><span class="p">(</span><span class="n">ori_shape</span><span class="p">,</span> <span class="n">reduced_shape</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="n">cls_name</span><span class="p">,</span> <span class="n">arg_name1</span><span class="p">,</span> <span class="n">arg_name2</span><span class="p">)</span>


<div class="viewcode-block" id="CosineEmbeddingLoss"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.CosineEmbeddingLoss.html#mindspore.nn.CosineEmbeddingLoss">[docs]</a><span class="k">class</span> <span class="nc">CosineEmbeddingLoss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    CosineEmbeddingLoss creates a criterion to measure the similarity between two tensors using cosine distance.</span>

<span class="sd">    Given two tensors :math:`x1`, :math:`x2`, and a Tensor label :math:`y` with values 1 or -1:</span>

<span class="sd">    .. math::</span>
<span class="sd">        loss(x_1, x_2, y) = \begin{cases}</span>
<span class="sd">        1-cos(x_1, x_2), &amp; \text{if } y = 1\\</span>
<span class="sd">        max(0, cos(x_1, x_2)-margin), &amp; \text{if } y = -1\\</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Args:</span>
<span class="sd">        margin (float): Should be in [-1.0, 1.0]. Default 0.0.</span>
<span class="sd">        reduction (str): Specifies which reduction to be applied to the output. It must be one of</span>
<span class="sd">          &quot;none&quot;, &quot;mean&quot;, and &quot;sum&quot;, meaning no reduction, reduce mean and sum on output, respectively. Default &quot;mean&quot;.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits_x1** (Tensor) - Tensor of shape :math:`(N, *)` where :math:`*` means, any number</span>
<span class="sd">          of additional dimensions.</span>
<span class="sd">        - **logits_x2** (Tensor) - Tensor of shape :math:`(N, *)`, same shape and dtype as `logits_x1`.</span>
<span class="sd">        - **labels** (Tensor) - Contains value 1 or -1. Suppose the shape of `logits_x1` is</span>
<span class="sd">          :math:`(x_1, x_2, x_3, ..., x_R)`, then the shape of `labels` must be :math:`(x_1, x_3, x_4, ..., x_R)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor or Scalar, if `reduction` is &quot;none&quot;, its shape is the same as `labels`.</span>
<span class="sd">        Otherwise, a scalar value will be returned.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `margin` is not a float.</span>
<span class="sd">        ValueError: If `reduction` is not one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;.</span>
<span class="sd">        ValueError: If `margin` is not in range [-1, 1].</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; logits_x1 = Tensor(np.array([[0.3, 0.8], [0.4, 0.3]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; logits_x2 = Tensor(np.array([[0.4, 1.2], [-0.4, -0.9]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([1, -1]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; cosine_embedding_loss = nn.CosineEmbeddingLoss()</span>
<span class="sd">        &gt;&gt;&gt; output = cosine_embedding_loss(logits_x1, logits_x2, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        0.0003425479</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">margin</span><span class="o">=</span><span class="mf">0.0</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s2">&quot;mean&quot;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize CosineEmbeddingLoss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">CosineEmbeddingLoss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">reduction</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">ReduceSum</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">maximum</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Maximum</span><span class="p">()</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;margin&quot;</span><span class="p">,</span> <span class="n">margin</span><span class="p">,</span> <span class="p">[</span><span class="nb">float</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">margin</span> <span class="o">=</span> <span class="n">validator</span><span class="o">.</span><span class="n">check_float_range</span><span class="p">(</span><span class="n">margin</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_BOTH</span><span class="p">,</span> <span class="s2">&quot;margin&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits_x1</span><span class="p">,</span> <span class="n">logits_x2</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits_x1&#39;</span><span class="p">,</span> <span class="n">logits_x1</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits_x2&#39;</span><span class="p">,</span> <span class="n">logits_x2</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">F</span><span class="o">.</span><span class="n">same_type_shape</span><span class="p">(</span><span class="n">logits_x1</span><span class="p">,</span> <span class="n">logits_x2</span><span class="p">)</span>
        <span class="n">_check_reduced_shape_valid</span><span class="p">(</span><span class="n">F</span><span class="o">.</span><span class="n">shape</span><span class="p">(</span><span class="n">logits_x1</span><span class="p">),</span> <span class="n">F</span><span class="o">.</span><span class="n">shape</span><span class="p">(</span><span class="n">labels</span><span class="p">),</span> <span class="p">(</span><span class="mi">1</span><span class="p">,),</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">,</span> <span class="s2">&quot;logits_x1&quot;</span><span class="p">,</span> <span class="s2">&quot;labels&quot;</span><span class="p">)</span>
        <span class="c1"># if labels &gt; 0, 1-cosine(logits_x1, logits_x2)</span>
        <span class="c1"># else, max(0, cosine(logits_x1, logits_x2)-margin)</span>
        <span class="n">prod_sum</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="n">logits_x1</span> <span class="o">*</span> <span class="n">logits_x2</span><span class="p">,</span> <span class="p">(</span><span class="mi">1</span><span class="p">,))</span>
        <span class="n">square1</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="n">F</span><span class="o">.</span><span class="n">square</span><span class="p">(</span><span class="n">logits_x1</span><span class="p">),</span> <span class="p">(</span><span class="mi">1</span><span class="p">,))</span>
        <span class="n">square2</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reduce_sum</span><span class="p">(</span><span class="n">F</span><span class="o">.</span><span class="n">square</span><span class="p">(</span><span class="n">logits_x2</span><span class="p">),</span> <span class="p">(</span><span class="mi">1</span><span class="p">,))</span>
        <span class="n">denom</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">sqrt</span><span class="p">(</span><span class="n">square1</span><span class="p">)</span> <span class="o">*</span> <span class="n">F</span><span class="o">.</span><span class="n">sqrt</span><span class="p">(</span><span class="n">square2</span><span class="p">)</span>
        <span class="n">cosine</span> <span class="o">=</span> <span class="n">prod_sum</span> <span class="o">/</span> <span class="n">denom</span>

        <span class="n">pos_value</span> <span class="o">=</span> <span class="mf">1.0</span> <span class="o">-</span> <span class="n">cosine</span>
        <span class="n">neg_value</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">maximum</span><span class="p">(</span><span class="n">cosine</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">margin</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">)</span>
        <span class="n">zeros</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">zeros_like</span><span class="p">(</span><span class="n">cosine</span><span class="p">)</span>
        <span class="n">pos_part</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">select</span><span class="p">(</span><span class="n">labels</span> <span class="o">==</span> <span class="mi">1</span><span class="p">,</span> <span class="n">pos_value</span><span class="p">,</span> <span class="n">zeros</span><span class="p">)</span>
        <span class="n">neg_part</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">select</span><span class="p">(</span><span class="n">labels</span> <span class="o">==</span> <span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="n">neg_value</span><span class="p">,</span> <span class="n">zeros</span><span class="p">)</span>
        <span class="n">output_unreduced</span> <span class="o">=</span> <span class="n">pos_part</span> <span class="o">+</span> <span class="n">neg_part</span>

        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">get_loss</span><span class="p">(</span><span class="n">output_unreduced</span><span class="p">)</span></div>


<span class="k">class</span> <span class="nc">BCEWithLogitsLoss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Adds sigmoid activation function to input logits, and uses the given logits to compute binary cross entropy</span>
<span class="sd">    between the logits and the labels.</span>

<span class="sd">    Sets input `logits` as :math:`X`, input `labels` as :math:`Y`, output as :math:`L`. Then,</span>

<span class="sd">    .. math::</span>
<span class="sd">        p_{ij} = sigmoid(X_{ij}) = \frac{1}{1 + e^{-X_{ij}}}</span>

<span class="sd">    .. math::</span>
<span class="sd">        L_{ij} = -[Y_{ij} \cdot log(p_{ij}) + (1 - Y_{ij}) \cdot log(1 - p_{ij})]</span>

<span class="sd">    Then,</span>

<span class="sd">    .. math::</span>
<span class="sd">        \ell(x, y) = \begin{cases}</span>
<span class="sd">        L, &amp; \text{if reduction} = \text{&#39;none&#39;;}\\</span>
<span class="sd">        \operatorname{mean}(L), &amp; \text{if reduction} = \text{&#39;mean&#39;;}\\</span>
<span class="sd">        \operatorname{sum}(L),  &amp; \text{if reduction} = \text{&#39;sum&#39;.}</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    Args:</span>
<span class="sd">        reduction (str): Type of reduction to be applied to loss. The optional values are &#39;mean&#39;, &#39;sum&#39;, and &#39;none&#39;.</span>
<span class="sd">            If &#39;none&#39;, do not perform reduction. Default:&#39;mean&#39;.</span>
<span class="sd">        weight (Tensor, optional): A rescaling weight applied to the loss of each batch element.</span>
<span class="sd">            If not None, it can be broadcast to a tensor with shape of `logits`,</span>
<span class="sd">            data type must be float16 or float32. Default: None.</span>
<span class="sd">        pos_weight (Tensor, optional): A weight of positive examples. Must be a vector with length equal to the</span>
<span class="sd">            number of classes. If not None, it must be broadcast to a tensor with shape of `logits`, data type</span>
<span class="sd">            must be float16 or float32. Default: None.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - Input logits with shape :math:`(N, *)` where :math:`*` means, any number</span>
<span class="sd">          of additional dimensions. The data type must be float16 or float32.</span>
<span class="sd">        - **labels** (Tensor) - Ground truth label with shape :math:`(N, *)`, same shape and dtype as `logits`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor or Scalar, if `reduction` is &quot;none&quot;, its shape is the same as `logits`.</span>
<span class="sd">        Otherwise, a scalar value will be returned.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If data type of `logits` or `labels` is neither float16 nor float32.</span>
<span class="sd">        TypeError: If `weight` or `pos_weight` is a parameter.</span>
<span class="sd">        TypeError: If data type of `weight` or `pos_weight` is neither float16 nor float32.</span>
<span class="sd">        ValueError: If `weight` or `pos_weight` can not be broadcast to a tensor with shape of `logits`.</span>
<span class="sd">        ValueError: If `reduction` is not one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``  ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor(np.array([[-0.8, 1.2, 0.7], [-0.1, -0.4, 0.7]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor(np.array([[0.3, 0.8, 1.2], [-0.6, 0.1, 2.2]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; loss = nn.BCEWithLogitsLoss()</span>
<span class="sd">        &gt;&gt;&gt; output = loss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        0.3463612</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;mean&#39;</span><span class="p">,</span> <span class="n">weight</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">pos_weight</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize BCEWithLogitsLoss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">BCEWithLogitsLoss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">bce_with_logits_loss</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">BCEWithLogitsLoss</span><span class="p">(</span><span class="n">reduction</span><span class="o">=</span><span class="n">reduction</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">Parameter</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;weight&#39; can not be a Parameter.&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">pos_weight</span><span class="p">,</span> <span class="n">Parameter</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;pos_weight&#39; can not be a Parameter.&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">weight</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">pos_weight</span> <span class="o">=</span> <span class="n">pos_weight</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">ones</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">OnesLike</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits&#39;</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">ones_input</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">ones</span><span class="p">(</span><span class="n">logits</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">weight</span> <span class="o">=</span> <span class="n">ones_input</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">pos_weight</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">pos_weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">pos_weight</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">pos_weight</span> <span class="o">=</span> <span class="n">ones_input</span>
        <span class="n">loss</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">bce_with_logits_loss</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">pos_weight</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">loss</span>


<span class="nd">@constexpr</span>
<span class="k">def</span> <span class="nf">_check_ndim</span><span class="p">(</span><span class="n">logits_nidm</span><span class="p">,</span> <span class="n">labels_ndim</span><span class="p">,</span> <span class="n">prime_name</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
    <span class="sd">&#39;&#39;&#39;Internal function, used to check whether the dimension of logits and labels meets the requirements.&#39;&#39;&#39;</span>
    <span class="n">msg_prefix</span> <span class="o">=</span> <span class="sa">f</span><span class="s1">&#39;For </span><span class="se">\&#39;</span><span class="si">{</span><span class="n">prime_name</span><span class="si">}</span><span class="se">\&#39;</span><span class="s1">, the&#39;</span> <span class="k">if</span> <span class="n">prime_name</span> <span class="k">else</span> <span class="s2">&quot;The&quot;</span>
    <span class="k">if</span> <span class="n">logits_nidm</span> <span class="o">&lt;</span> <span class="mi">2</span> <span class="ow">or</span> <span class="n">logits_nidm</span> <span class="o">&gt;</span> <span class="mi">4</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">msg_prefix</span><span class="si">}</span><span class="s2"> dimensions of &#39;logits&#39; should be in [2, 4], but got &quot;</span>
                         <span class="sa">f</span><span class="s2">&quot;dimension of &#39;logits&#39; </span><span class="si">{</span><span class="n">logits_nidm</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">labels_ndim</span> <span class="o">&lt;</span> <span class="mi">2</span> <span class="ow">or</span> <span class="n">labels_ndim</span> <span class="o">&gt;</span> <span class="mi">4</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">msg_prefix</span><span class="si">}</span><span class="s2"> dimensions of &#39;labels&#39; should be in [2, 4], but got &quot;</span>
                         <span class="sa">f</span><span class="s2">&quot;dimension of &#39;labels&#39; </span><span class="si">{</span><span class="n">labels_ndim</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">logits_nidm</span> <span class="o">!=</span> <span class="n">labels_ndim</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">msg_prefix</span><span class="si">}</span><span class="s2"> dimensions of &#39;logits&#39; and &#39;labels&#39; must be equal, but got &quot;</span>
                         <span class="sa">f</span><span class="s2">&quot;dimension of &#39;logits&#39; </span><span class="si">{</span><span class="n">logits_nidm</span><span class="si">}</span><span class="s2"> and dimension of &#39;labels&#39; </span><span class="si">{</span><span class="n">labels_ndim</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>


<span class="nd">@constexpr</span>
<span class="k">def</span> <span class="nf">_check_channel_and_shape</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="n">prime_name</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
    <span class="sd">&#39;&#39;&#39;Internal function, used to check whether the channels or shape of logits and labels meets the requirements.&#39;&#39;&#39;</span>
    <span class="n">msg_prefix</span> <span class="o">=</span> <span class="sa">f</span><span class="s1">&#39;For </span><span class="se">\&#39;</span><span class="si">{</span><span class="n">prime_name</span><span class="si">}</span><span class="se">\&#39;</span><span class="s1">, the&#39;</span> <span class="k">if</span> <span class="n">prime_name</span> <span class="k">else</span> <span class="s2">&quot;The&quot;</span>
    <span class="k">if</span> <span class="n">logits</span> <span class="o">==</span> <span class="mi">1</span><span class="p">:</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">msg_prefix</span><span class="si">}</span><span class="s2"> &#39;logits&#39;.shape[1] cannot be one, but got </span><span class="si">{</span><span class="n">logits</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">labels</span> <span class="ow">not</span> <span class="ow">in</span> <span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">logits</span><span class="p">):</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">msg_prefix</span><span class="si">}</span><span class="s2"> &#39;labels&#39;.shape[1] must be one or equal to &#39;logits&#39;.shape[1]: </span><span class="si">{</span><span class="n">logits</span><span class="si">}</span><span class="s2">, &quot;</span>
                         <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="n">labels</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>


<span class="nd">@constexpr</span>
<span class="k">def</span> <span class="nf">_check_input_dtype</span><span class="p">(</span><span class="n">labels_dtype</span><span class="p">,</span> <span class="n">cls_name</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Internal function, used to check whether the data type of labels meets the requirements.&quot;&quot;&quot;</span>
    <span class="n">validator</span><span class="o">.</span><span class="n">check_type_name</span><span class="p">(</span><span class="s2">&quot;labels&quot;</span><span class="p">,</span> <span class="n">labels_dtype</span><span class="p">,</span>
                              <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">],</span> <span class="n">cls_name</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">FocalLoss</span><span class="p">(</span><span class="n">LossBase</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    The loss function proposed by Kaiming team in their paper ``Focal Loss for Dense Object Detection`` improves the</span>
<span class="sd">    effect of image object detection. It is a loss function to solve the imbalance of categories and the difference of</span>
<span class="sd">    classification difficulty. If you want to learn more, please refer to the paper.</span>
<span class="sd">    `Focal Loss for Dense Object Detection &lt;https://arxiv.org/pdf/1708.02002.pdf&gt;`_. The function is shown as follows:</span>

<span class="sd">    .. math::</span>
<span class="sd">        FL(p_t) = -(1-p_t)^\gamma log(p_t)</span>

<span class="sd">    Args:</span>
<span class="sd">        gamma (float): Gamma is used to adjust the steepness of weight curve in focal loss. Default: 2.0.</span>
<span class="sd">        weight (Union[Tensor, None]): A rescaling weight applied to the loss of each batch element. The dimension of</span>
<span class="sd">                                      weight should be 1. If None, no weight is applied. Default: None.</span>
<span class="sd">        reduction (str): Type of reduction to be applied to loss. The optional values are &quot;mean&quot;, &quot;sum&quot;, and &quot;none&quot;.</span>
<span class="sd">                         If &quot;none&quot;, do not perform reduction. Default: &quot;mean&quot;.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **logits** (Tensor) - Tensor of shape should be :math:`(B, C)` or :math:`(B, C, H)` or :math:`(B, C, H, W)`.</span>
<span class="sd">          Where :math:`C` is the number of classes. Its value is greater than 1. If the shape is :math:`(B, C, H, W)`</span>
<span class="sd">          or :math:`(B, C, H)`, the :math:`H` or product of :math:`H` and :math:`W` should be the same as labels.</span>
<span class="sd">        - **labels** (Tensor) - Tensor of shape should be :math:`(B, C)` or :math:`(B, C, H)` or :math:`(B, C, H, W)`.</span>
<span class="sd">          The value of :math:`C` is 1 or it needs to be the same as predict&#39;s :math:`C`. If :math:`C` is not 1,</span>
<span class="sd">          the shape of target should be the same as that of predict, where :math:`C` is the number of classes.</span>
<span class="sd">          If the shape is :math:`(B, C, H, W)` or :math:`(B, C, H)`, the :math:`H` or product of :math:`H`</span>
<span class="sd">          and :math:`W` should be the same as logits.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor or Scalar, if `reduction` is &quot;none&quot;, its shape is the same as `logits`.</span>
<span class="sd">        Otherwise, a scalar value will be returned.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If the data type of `gamma` is not a float.</span>
<span class="sd">        TypeError: If `weight` is not a Tensor.</span>
<span class="sd">        ValueError: If `labels` dim is different from `logits`.</span>
<span class="sd">        ValueError: If `labels` channel is not 1 and `labels` shape is different from `logits`.</span>
<span class="sd">        ValueError: If `reduction` is not one of &#39;none&#39;, &#39;mean&#39;, &#39;sum&#39;.</span>
<span class="sd">        ValueError: If the value of `labels` is not in the range [-:math:`C`, :math:`C`).</span>
<span class="sd">                    Where :math:`C` is the number of classes in logits.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Example:</span>
<span class="sd">        &gt;&gt;&gt; logits = Tensor([[0.8, 1.4], [0.5, 0.9], [1.2, 0.9]], mstype.float32)</span>
<span class="sd">        &gt;&gt;&gt; labels = Tensor([[1], [1], [0]], mstype.int32)</span>
<span class="sd">        &gt;&gt;&gt; focalloss = nn.FocalLoss(weight=Tensor([1, 2]), gamma=2.0, reduction=&#39;mean&#39;)</span>
<span class="sd">        &gt;&gt;&gt; output = focalloss(logits, labels)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        0.12516622</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">weight</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">gamma</span><span class="o">=</span><span class="mf">2.0</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;mean&#39;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize FocalLoss.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">FocalLoss</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">reduction</span><span class="o">=</span><span class="n">reduction</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span> <span class="o">=</span> <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;gamma&quot;</span><span class="p">,</span> <span class="n">gamma</span><span class="p">,</span> <span class="p">[</span><span class="nb">float</span><span class="p">])</span>
        <span class="k">if</span> <span class="n">weight</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the type of &#39;weight&#39; should be a Tensor, &quot;</span>
                            <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="nb">type</span><span class="p">(</span><span class="n">weight</span><span class="p">)</span><span class="o">.</span><span class="vm">__name__</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">)</span> <span class="ow">and</span> <span class="n">weight</span><span class="o">.</span><span class="n">ndim</span> <span class="o">!=</span> <span class="mi">1</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the dimension of &#39;weight&#39; should be 1, but got </span><span class="si">{</span><span class="n">weight</span><span class="o">.</span><span class="n">ndim</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">weight</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">expand_dims</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">ExpandDims</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">gather_d</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">GatherD</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">squeeze</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Squeeze</span><span class="p">(</span><span class="n">axis</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">tile</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Tile</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">cast</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Cast</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dtype</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">DType</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">logsoftmax</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">LogSoftmax</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="n">labels</span><span class="p">):</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;logits&#39;</span><span class="p">,</span> <span class="n">logits</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_is_tensor</span><span class="p">(</span><span class="s1">&#39;labels&#39;</span><span class="p">,</span> <span class="n">labels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">labelss</span> <span class="o">=</span> <span class="n">labels</span>
        <span class="n">_check_ndim</span><span class="p">(</span><span class="n">logits</span><span class="o">.</span><span class="n">ndim</span><span class="p">,</span> <span class="n">labelss</span><span class="o">.</span><span class="n">ndim</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_channel_and_shape</span><span class="p">(</span><span class="n">logits</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">labelss</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="n">_check_input_dtype</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dtype</span><span class="p">(</span><span class="n">labelss</span><span class="p">),</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">logits</span><span class="o">.</span><span class="n">ndim</span> <span class="o">&gt;</span> <span class="mi">2</span><span class="p">:</span>
            <span class="n">logits</span> <span class="o">=</span> <span class="n">logits</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="n">logits</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">logits</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="o">-</span><span class="mi">1</span><span class="p">)</span>
            <span class="n">labelss</span> <span class="o">=</span> <span class="n">labelss</span><span class="o">.</span><span class="n">view</span><span class="p">(</span><span class="n">labelss</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">labelss</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="o">-</span><span class="mi">1</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">logits</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">expand_dims</span><span class="p">(</span><span class="n">logits</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
            <span class="n">labelss</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">expand_dims</span><span class="p">(</span><span class="n">labelss</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>

        <span class="n">log_probability</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">logsoftmax</span><span class="p">(</span><span class="n">logits</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">labels</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">==</span> <span class="mi">1</span><span class="p">:</span>
            <span class="n">log_probability</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">gather_d</span><span class="p">(</span><span class="n">log_probability</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cast</span><span class="p">(</span><span class="n">labelss</span><span class="p">,</span> <span class="n">mindspore</span><span class="o">.</span><span class="n">int32</span><span class="p">))</span>
            <span class="n">log_probability</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">squeeze</span><span class="p">(</span><span class="n">log_probability</span><span class="p">)</span>

        <span class="n">probability</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">exp</span><span class="p">(</span><span class="n">log_probability</span><span class="p">)</span>

        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">convert_weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span><span class="p">[</span><span class="kc">None</span><span class="p">,</span> <span class="p">:,</span> <span class="kc">None</span><span class="p">]</span>
            <span class="n">convert_weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">tile</span><span class="p">(</span><span class="n">convert_weight</span><span class="p">,</span> <span class="p">(</span><span class="n">labelss</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="mi">1</span><span class="p">,</span> <span class="n">labelss</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">2</span><span class="p">]))</span>
            <span class="k">if</span> <span class="n">labels</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">==</span> <span class="mi">1</span><span class="p">:</span>
                <span class="n">convert_weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">gather_d</span><span class="p">(</span><span class="n">convert_weight</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cast</span><span class="p">(</span><span class="n">labelss</span><span class="p">,</span> <span class="n">mindspore</span><span class="o">.</span><span class="n">int32</span><span class="p">))</span>
                <span class="n">convert_weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">squeeze</span><span class="p">(</span><span class="n">convert_weight</span><span class="p">)</span>
            <span class="n">log_probability</span> <span class="o">=</span> <span class="n">log_probability</span> <span class="o">*</span> <span class="n">convert_weight</span>

        <span class="n">weight</span> <span class="o">=</span> <span class="n">F</span><span class="o">.</span><span class="n">pows</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span> <span class="o">*</span> <span class="n">probability</span> <span class="o">+</span> <span class="mf">1.0</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">labels</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">==</span> <span class="mi">1</span><span class="p">:</span>
            <span class="n">loss</span> <span class="o">=</span> <span class="p">(</span><span class="o">-</span><span class="mi">1</span> <span class="o">*</span> <span class="n">weight</span> <span class="o">*</span> <span class="n">log_probability</span><span class="p">)</span><span class="o">.</span><span class="n">mean</span><span class="p">(</span><span class="n">axis</span><span class="o">=</span><span class="mi">1</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">loss</span> <span class="o">=</span> <span class="p">(</span><span class="o">-</span><span class="mi">1</span> <span class="o">*</span> <span class="n">weight</span> <span class="o">*</span> <span class="n">labelss</span> <span class="o">*</span> <span class="n">log_probability</span><span class="p">)</span><span class="o">.</span><span class="n">mean</span><span class="p">(</span><span class="n">axis</span><span class="o">=-</span><span class="mi">1</span><span class="p">)</span>

        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">get_loss</span><span class="p">(</span><span class="n">loss</span><span class="p">)</span>
</pre></div>

           </div>
           
          </div>
          <footer>

  <hr/>

  <div role="contentinfo">
    <p>
        &#169; Copyright 2021, MindSpore.

    </p>
  </div>
    
    
    
    Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>
        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
   

</body>
</html>