

<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8" />
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
  
  <title>mindspore.ops.operations.array_ops &mdash; MindSpore master documentation</title>
  

  
  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />

  
  

  
  

  

  
  <!--[if lt IE 9]>
    <script src="../../../../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../../../" src="../../../../_static/documentation_options.js"></script>
        <script src="../../../../_static/jquery.js"></script>
        <script src="../../../../_static/underscore.js"></script>
        <script src="../../../../_static/doctools.js"></script>
        <script src="../../../../_static/language_data.js"></script>
        <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
    
    <script type="text/javascript" src="../../../../_static/js/theme.js"></script>

    
    <link rel="index" title="Index" href="../../../../genindex.html" />
    <link rel="search" title="Search" href="../../../../search.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../../../index.html" class="icon icon-home"> MindSpore
          

          
          </a>

          
            
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">MindSpore Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.html">mindspore</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.common.initializer.html">mindspore.common.initializer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.communication.html">mindspore.communication</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.compression.html">mindspore.compression</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.context.html">mindspore.context</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.html">mindspore.dataset</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.audio.html">mindspore.dataset.audio</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.config.html">mindspore.dataset.config</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.text.html">mindspore.dataset.text</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.transforms.html">mindspore.dataset.transforms</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.vision.html">mindspore.dataset.vision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.mindrecord.html">mindspore.mindrecord</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.html">mindspore.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.probability.html">mindspore.nn.probability</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.transformer.html">mindspore.nn.transformer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.numpy.html">mindspore.numpy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.ops.html">mindspore.ops</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.parallel.html">mindspore.parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.parallel.nn.html">mindspore.parallel.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.profiler.html">mindspore.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.scipy.html">mindspore.scipy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.train.html">mindspore.train</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.boost.html">mindspore.boost</a></li>
</ul>
<p class="caption"><span class="caption-text">MindSpore C++ API</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://www.mindspore.cn/lite/api/zh-CN/master/api_cpp/mindspore.html">MindSpore Lite↗</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../../../../index.html">MindSpore</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          

















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../../../index.html" class="icon icon-home"></a> &raquo;</li>
        
          <li><a href="../../../index.html">Module code</a> &raquo;</li>
        
      <li>mindspore.ops.operations.array_ops</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <h1>Source code for mindspore.ops.operations.array_ops</h1><div class="highlight"><pre>
<span></span><span class="c1"># coding: utf-8</span>

<span class="c1"># Copyright 2020-2022 Huawei Technologies Co., Ltd</span>
<span class="c1">#</span>
<span class="c1"># Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);</span>
<span class="c1"># you may not use this file except in compliance with the License.</span>
<span class="c1"># You may obtain a copy of the License at</span>
<span class="c1">#</span>
<span class="c1"># http://www.apache.org/licenses/LICENSE-2.0</span>
<span class="c1">#</span>
<span class="c1"># Unless required by applicable law or agreed to in writing, software</span>
<span class="c1"># distributed under the License is distributed on an &quot;AS IS&quot; BASIS,</span>
<span class="c1"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span>
<span class="c1"># See the License for the specific language governing permissions and</span>

<span class="c1"># limitations under the License.</span>
<span class="c1"># ============================================================================</span>

<span class="sd">&quot;&quot;&quot;Operators for array.&quot;&quot;&quot;</span>
<span class="kn">import</span> <span class="nn">copy</span>
<span class="kn">import</span> <span class="nn">functools</span>
<span class="kn">import</span> <span class="nn">itertools</span>
<span class="kn">import</span> <span class="nn">numbers</span>
<span class="kn">from</span> <span class="nn">collections</span> <span class="kn">import</span> <span class="n">Counter</span>

<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>

<span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">log</span> <span class="k">as</span> <span class="n">logger</span>
<span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">context</span>
<span class="kn">from</span> <span class="nn">mindspore.common.initializer</span> <span class="kn">import</span> <span class="n">Zero</span>
<span class="kn">from</span> <span class="nn">..</span> <span class="kn">import</span> <span class="n">signature</span> <span class="k">as</span> <span class="n">sig</span>
<span class="kn">from</span> <span class="nn">.._utils</span> <span class="kn">import</span> <span class="n">get_broadcast_shape</span><span class="p">,</span> <span class="n">is_shape_unknown</span>
<span class="kn">from</span> <span class="nn">.._utils</span> <span class="kn">import</span> <span class="n">get_concat_offset</span>
<span class="kn">from</span> <span class="nn">..operations.math_ops</span> <span class="kn">import</span> <span class="n">_infer_shape_reduce</span>
<span class="kn">from</span> <span class="nn">..primitive</span> <span class="kn">import</span> <span class="n">Primitive</span><span class="p">,</span> <span class="n">PrimitiveWithInfer</span><span class="p">,</span> <span class="n">PrimitiveWithCheck</span><span class="p">,</span> <span class="n">prim_attr_register</span><span class="p">,</span> <span class="n">_run_op</span>
<span class="kn">from</span> <span class="nn">..._checkparam</span> <span class="kn">import</span> <span class="n">Rel</span>
<span class="kn">from</span> <span class="nn">..._checkparam</span> <span class="kn">import</span> <span class="n">Validator</span> <span class="k">as</span> <span class="n">validator</span>
<span class="kn">from</span> <span class="nn">..._checkparam</span> <span class="kn">import</span> <span class="n">_check_3d_int_or_tuple</span>
<span class="kn">from</span> <span class="nn">...common</span> <span class="kn">import</span> <span class="n">dtype</span> <span class="k">as</span> <span class="n">mstype</span>
<span class="kn">from</span> <span class="nn">...common._decorator</span> <span class="kn">import</span> <span class="n">deprecated</span>
<span class="kn">from</span> <span class="nn">...common.parameter</span> <span class="kn">import</span> <span class="n">Parameter</span>
<span class="kn">from</span> <span class="nn">...common.tensor</span> <span class="kn">import</span> <span class="n">Tensor</span>
<span class="kn">from</span> <span class="nn">..._c_expression</span> <span class="kn">import</span> <span class="n">Tensor</span> <span class="k">as</span> <span class="n">Tensor_</span>


<span class="k">class</span> <span class="nc">_ScatterOp</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Defines Scatter operators</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__mindspore_signature__</span> <span class="o">=</span> <span class="p">(</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="n">sig</span><span class="o">.</span><span class="n">sig_rw</span><span class="o">.</span><span class="n">RW_WRITE</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T</span><span class="p">),</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T1</span><span class="p">),</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;updates&#39;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T</span><span class="p">)</span>
    <span class="p">)</span>

    <span class="k">def</span> <span class="nf">_check_scatter_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">,</span> <span class="n">prim_name</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">indices_shape</span> <span class="o">!=</span> <span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="ow">and</span> <span class="n">updates_shape</span> <span class="ow">and</span> <span class="n">updates_shape</span> <span class="o">!=</span> <span class="n">indices_shape</span> <span class="o">+</span> <span class="n">x_shape</span><span class="p">[</span><span class="mi">1</span><span class="p">:]:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="n">prim_name</span><span class="si">}</span><span class="s2">&#39;, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;updates_shape = indices_shape + x_shape[1:], but got x_shape: </span><span class="si">{</span><span class="n">x_shape</span><span class="si">}</span><span class="s2">, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;indices_shape: </span><span class="si">{</span><span class="n">indices_shape</span><span class="si">}</span><span class="s2">, updates_shape: </span><span class="si">{</span><span class="n">updates_shape</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">use_locking</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize _ScatterOp&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;use_locking&#39;</span><span class="p">,</span> <span class="n">use_locking</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;side_effect_mem&#39;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_check_scatter_shape</span><span class="p">(</span><span class="n">x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="n">updates_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;x&quot;</span><span class="p">:</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="s2">&quot;updates&quot;</span><span class="p">:</span> <span class="n">updates_dtype</span><span class="p">}</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_dtype</span>


<span class="k">class</span> <span class="nc">_ScatterOpDynamic</span><span class="p">(</span><span class="n">PrimitiveWithCheck</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Defines Scatter operators with dynamic shape</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">__mindspore_signature__</span> <span class="o">=</span> <span class="p">(</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="n">sig</span><span class="o">.</span><span class="n">sig_rw</span><span class="o">.</span><span class="n">RW_WRITE</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T</span><span class="p">),</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T1</span><span class="p">),</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;updates&#39;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T</span><span class="p">)</span>
    <span class="p">)</span>

    <span class="k">def</span> <span class="nf">_check_scatter_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">,</span> <span class="n">prim_name</span><span class="p">):</span>
        <span class="c1"># x_shape cannot be dynamic</span>
        <span class="k">if</span> <span class="n">np</span><span class="o">.</span><span class="n">any</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span> <span class="o">==</span> <span class="o">-</span><span class="mi">1</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="n">prim_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;input_x&#39; does not support dynamic shape, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got the shape of &#39;input_x&#39; is </span><span class="si">{</span><span class="n">x_shape</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="c1"># support indices and updates dynamic</span>
        <span class="k">if</span> <span class="n">np</span><span class="o">.</span><span class="n">any</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">indices_shape</span><span class="p">)</span> <span class="o">==</span> <span class="o">-</span><span class="mi">1</span><span class="p">)</span> <span class="ow">or</span> <span class="n">np</span><span class="o">.</span><span class="n">any</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">updates_shape</span><span class="p">)</span> <span class="o">==</span> <span class="o">-</span><span class="mi">1</span><span class="p">):</span>
            <span class="k">pass</span>
        <span class="k">elif</span> <span class="n">indices_shape</span> <span class="o">!=</span> <span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="ow">and</span> <span class="n">updates_shape</span> <span class="ow">and</span> <span class="n">updates_shape</span> <span class="o">!=</span> <span class="n">indices_shape</span> <span class="o">+</span> <span class="n">x_shape</span><span class="p">[</span><span class="mi">1</span><span class="p">:]:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="n">prim_name</span><span class="si">}</span><span class="s2">&#39;, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;updates_shape = indices_shape + x_shape[1:], but got x_shape: </span><span class="si">{</span><span class="n">x_shape</span><span class="si">}</span><span class="s2">, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;indices_shape: </span><span class="si">{</span><span class="n">indices_shape</span><span class="si">}</span><span class="s2">, updates_shape: </span><span class="si">{</span><span class="n">updates_shape</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">use_locking</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize _ScatterOpDynamic&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;use_locking&#39;</span><span class="p">,</span> <span class="n">use_locking</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;side_effect_mem&#39;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">check_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_check_scatter_shape</span><span class="p">(</span><span class="n">x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">check_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="n">updates_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;x&quot;</span><span class="p">:</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="s2">&quot;updates&quot;</span><span class="p">:</span> <span class="n">updates_dtype</span><span class="p">}</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">_ScatterNdOp</span><span class="p">(</span><span class="n">_ScatterOp</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Defines _ScatterNd operators</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">_check_scatter_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">,</span> <span class="n">prim_name</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;the dimension of x&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">),</span>
                        <span class="s1">&#39;the dimension of indices&#39;</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">indices_shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">+</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]:]</span> <span class="o">!=</span> <span class="n">updates_shape</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="n">prim_name</span><span class="si">}</span><span class="s2">&#39;, updates_shape = &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;indices_shape[:-1] + x_shape[indices_shape[-1]:], but got x_shape: </span><span class="si">{</span><span class="n">x_shape</span><span class="si">}</span><span class="s2">, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;indices_shape: </span><span class="si">{</span><span class="n">indices_shape</span><span class="si">}</span><span class="s2">, updates_shape: </span><span class="si">{</span><span class="n">updates_shape</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>


<span class="k">def</span> <span class="nf">_check_infer_attr_reduce</span><span class="p">(</span><span class="n">axis</span><span class="p">,</span> <span class="n">keep_dims</span><span class="p">,</span> <span class="n">prim_name</span><span class="p">):</span>
    <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;keep_dims&#39;</span><span class="p">,</span> <span class="n">keep_dims</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="n">prim_name</span><span class="p">)</span>
    <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;axis&#39;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">],</span> <span class="n">prim_name</span><span class="p">)</span>
    <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">axis</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">):</span>
        <span class="k">for</span> <span class="n">index</span><span class="p">,</span> <span class="n">value</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">axis</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;axis[</span><span class="si">%d</span><span class="s1">]&#39;</span> <span class="o">%</span> <span class="n">index</span><span class="p">,</span> <span class="n">value</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="n">prim_name</span><span class="p">)</span>


<div class="viewcode-block" id="ExpandDims"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ExpandDims.html#mindspore.ops.ExpandDims">[docs]</a><span class="k">class</span> <span class="nc">ExpandDims</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Adds an additional dimension to `input_x` at the given axis.</span>

<span class="sd">    Note:</span>
<span class="sd">        If the specified axis is a negative number, the index is counted</span>
<span class="sd">        backward from the end and starts at 1.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">        - **axis** (int) - Specifies the dimension index at which to expand</span>
<span class="sd">          the shape of `input_x`. The value of axis must be in the range</span>
<span class="sd">          `[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the</span>
<span class="sd">        value of `axis` is 0. It has the same data type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If `axis` is not an int or not in the valid range.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; expand_dims = ops.ExpandDims()</span>
<span class="sd">        &gt;&gt;&gt; output = expand_dims(input_tensor, 0)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[2. 2.]</span>
<span class="sd">          [2. 2.]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ExpandDims&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;axis&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">axis</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">])</span>
        <span class="n">axis_v</span> <span class="o">=</span> <span class="n">axis</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="n">axis_v</span><span class="p">,</span> <span class="o">-</span><span class="n">rank</span> <span class="o">-</span> <span class="mi">1</span><span class="p">,</span> <span class="n">rank</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_BOTH</span><span class="p">,</span> <span class="s1">&#39;axis&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">value</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="k">if</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">value</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">()</span>
            <span class="n">value</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">expand_dims</span><span class="p">(</span><span class="n">value</span><span class="p">,</span> <span class="n">axis_v</span><span class="p">)</span>
            <span class="n">value</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">value</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">axis_v</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
            <span class="n">axis_v</span> <span class="o">=</span> <span class="n">rank</span> <span class="o">+</span> <span class="mi">1</span> <span class="o">+</span> <span class="n">axis_v</span>
        <span class="n">x_shape</span><span class="o">.</span><span class="n">insert</span><span class="p">(</span><span class="n">axis_v</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">x_shape</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">value</span><span class="p">}</span>
        <span class="k">if</span> <span class="s1">&#39;min_shape&#39;</span> <span class="ow">in</span> <span class="n">x</span> <span class="ow">and</span> <span class="s1">&#39;max_shape&#39;</span> <span class="ow">in</span> <span class="n">x</span><span class="p">:</span>
            <span class="n">out</span><span class="p">[</span><span class="s1">&#39;min_shape&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;min_shape&#39;</span><span class="p">]</span>
            <span class="n">out</span><span class="p">[</span><span class="s1">&#39;min_shape&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">insert</span><span class="p">(</span><span class="n">axis_v</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
            <span class="n">out</span><span class="p">[</span><span class="s1">&#39;max_shape&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;max_shape&#39;</span><span class="p">]</span>
            <span class="n">out</span><span class="p">[</span><span class="s1">&#39;max_shape&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">insert</span><span class="p">(</span><span class="n">axis_v</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">out</span></div>


<div class="viewcode-block" id="DType"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.DType.html#mindspore.ops.DType">[docs]</a><span class="k">class</span> <span class="nc">DType</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns the data type of the input tensor as mindspore.dtype.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        mindspore.dtype, the data type of a tensor.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a Tensor.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = ops.DType()(input_tensor)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        Float32</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize DType&quot;&quot;&quot;</span></div>


<div class="viewcode-block" id="SameTypeShape"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.SameTypeShape.html#mindspore.ops.SameTypeShape">[docs]</a><span class="k">class</span> <span class="nc">SameTypeShape</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Checks whether the data type and shape of two tensors are the same.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">        - **input_y** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_S)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`,</span>
<span class="sd">        if data type and shape of `input_x` and `input_y` are the same.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If the data types of `input_x` and `input_y` are not the same.</span>
<span class="sd">        ValueError: If the shapes of `input_x` and `input_y` are not the same.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = ops.SameTypeShape()(input_x, input_y)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[2. 2.]</span>
<span class="sd">         [2. 2.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Same&quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__call__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;run in PyNative mode&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;y&#39;</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;x dtype&#39;</span><span class="p">,</span> <span class="n">x</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span> <span class="s1">&#39;y dtype&#39;</span><span class="p">,</span> <span class="n">y</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="ne">TypeError</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;x shape&#39;</span><span class="p">,</span> <span class="n">x</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="s1">&#39;y shape&#39;</span><span class="p">,</span> <span class="n">y</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s1">&#39;y&#39;</span><span class="p">,</span> <span class="n">y</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;x dtype&#39;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="s1">&#39;y dtype&#39;</span><span class="p">,</span> <span class="n">y</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="ne">TypeError</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;x shape&#39;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">],</span> <span class="s1">&#39;y shape&#39;</span><span class="p">,</span> <span class="n">y</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span></div>


<span class="k">class</span> <span class="nc">Cast</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns a tensor with the new specified data type.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Union[Tensor, Number]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">          The tensor to be cast.</span>
<span class="sd">        - **type** (dtype.Number) - The valid data type of the output tensor. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of tensor is the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is neither Tensor nor Number.</span>
<span class="sd">        TypeError: If `type` is not a Number.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(input_np)</span>
<span class="sd">        &gt;&gt;&gt; type_dst = mindspore.int32</span>
<span class="sd">        &gt;&gt;&gt; cast = ops.Cast()</span>
<span class="sd">        &gt;&gt;&gt; output = cast(input_x, type_dst)</span>
<span class="sd">        &gt;&gt;&gt; print(output.dtype)</span>
<span class="sd">        Int32</span>
<span class="sd">        &gt;&gt;&gt; print(output.shape)</span>
<span class="sd">        (2, 3, 4, 5)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="c1"># if primitive need setattr in __infer__ need add this flag</span>
        <span class="sd">&quot;&quot;&quot;Initialize Cast&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;dst_type&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">check_elim</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">dtype</span><span class="p">):</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="p">(</span><span class="n">Tensor</span><span class="p">,</span> <span class="n">numbers</span><span class="o">.</span><span class="n">Number</span><span class="p">,</span> <span class="n">Parameter</span><span class="p">)):</span>
            <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">Parameter</span><span class="p">):</span>
                <span class="n">data</span> <span class="o">=</span> <span class="n">x</span><span class="o">.</span><span class="n">data</span>
                <span class="k">if</span> <span class="n">data</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">dtype</span><span class="p">:</span>
                    <span class="k">return</span> <span class="p">(</span><span class="kc">True</span><span class="p">,</span> <span class="n">x</span><span class="p">)</span>
            <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">)</span> <span class="ow">and</span> <span class="n">x</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">dtype</span><span class="p">:</span>
                <span class="n">x</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
                <span class="n">x</span><span class="o">.</span><span class="n">set_cast_dtype</span><span class="p">()</span>
                <span class="k">return</span> <span class="p">(</span><span class="kc">True</span><span class="p">,</span> <span class="n">x</span><span class="p">)</span>
            <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">numbers</span><span class="o">.</span><span class="n">Number</span><span class="p">):</span>
                <span class="k">return</span> <span class="p">(</span><span class="kc">True</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">))</span>
        <span class="k">return</span> <span class="p">(</span><span class="kc">False</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">t</span><span class="p">):</span>
        <span class="n">src_type</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="n">dst_type</span> <span class="o">=</span> <span class="n">t</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>

        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;input_x&quot;</span><span class="p">,</span> <span class="n">src_type</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;type&quot;</span><span class="p">,</span> <span class="n">dst_type</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">src_type</span><span class="p">,</span> <span class="nb">type</span><span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">)):</span>
            <span class="n">src_type</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">element_type</span><span class="p">()</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">dst_type</span><span class="p">,</span> <span class="nb">type</span><span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">)):</span>
            <span class="n">dst_type</span> <span class="o">=</span> <span class="n">dst_type</span><span class="o">.</span><span class="n">element_type</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;DstT&#39;</span><span class="p">,</span> <span class="n">dst_type</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;SrcT&#39;</span><span class="p">,</span> <span class="n">src_type</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;dst_type&#39;</span><span class="p">,</span> <span class="n">dst_type</span><span class="p">)</span>

        <span class="n">value</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="k">if</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">np_dst_type</span> <span class="o">=</span> <span class="n">mstype</span><span class="o">.</span><span class="n">dtype_to_nptype</span><span class="p">(</span><span class="n">dst_type</span><span class="p">)</span>
            <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="p">(</span><span class="nb">int</span><span class="p">,</span> <span class="nb">float</span><span class="p">)):</span>
                <span class="n">value</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np_dst_type</span><span class="p">))</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">value</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">()</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np_dst_type</span><span class="p">))</span>

        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">],</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">(</span><span class="n">t</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]),</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">value</span><span class="p">}</span>
        <span class="k">if</span> <span class="s1">&#39;min_shape&#39;</span> <span class="ow">in</span> <span class="n">x</span> <span class="ow">and</span> <span class="s1">&#39;max_shape&#39;</span> <span class="ow">in</span> <span class="n">x</span><span class="p">:</span>
            <span class="n">out</span><span class="p">[</span><span class="s1">&#39;min_shape&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;min_shape&#39;</span><span class="p">]</span>
            <span class="n">out</span><span class="p">[</span><span class="s1">&#39;max_shape&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;max_shape&#39;</span><span class="p">]</span>
        <span class="k">return</span> <span class="n">out</span>


<div class="viewcode-block" id="IsSubClass"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.IsSubClass.html#mindspore.ops.IsSubClass">[docs]</a><span class="k">class</span> <span class="nc">IsSubClass</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Checks whether this type is a sub-class of another type.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **sub_type** (mindspore.dtype) - The type to be checked. Only constant value is allowed.</span>
<span class="sd">        - **type_** (mindspore.dtype) - The target type. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        bool, the check result.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `sub_type` or `type_` is not a Type.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; output = ops.IsSubClass()(mindspore.int32,  mindspore.intc)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        True</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">pass</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">sub_type</span><span class="p">,</span> <span class="n">type_</span><span class="p">):</span>
        <span class="n">sub_type_t</span> <span class="o">=</span> <span class="n">sub_type</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">type_v</span> <span class="o">=</span> <span class="n">type_</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>

        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;sub_type&quot;</span><span class="p">,</span> <span class="n">sub_type_t</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">Type</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;type_&quot;</span><span class="p">,</span> <span class="n">type_v</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">Type</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

        <span class="n">value</span> <span class="o">=</span> <span class="n">mstype</span><span class="o">.</span><span class="n">issubclass_</span><span class="p">(</span><span class="n">sub_type_t</span><span class="p">,</span> <span class="n">type_v</span><span class="p">)</span>

        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="p">(),</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">mstype</span><span class="o">.</span><span class="n">type_type</span><span class="p">,</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">value</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span></div>


<div class="viewcode-block" id="IsInstance"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.IsInstance.html#mindspore.ops.IsInstance">[docs]</a><span class="k">class</span> <span class="nc">IsInstance</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Checks whether an object is an instance of a target type.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **inst** (Any Object) - The instance to be checked. Only constant value is allowed.</span>
<span class="sd">        - **type_** (mindspore.dtype) - The target type. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        bool, the check result.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `type_` is not a Type.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; inst = 1</span>
<span class="sd">        &gt;&gt;&gt; output = ops.IsInstance()(inst, mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        False</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">pass</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">inst</span><span class="p">,</span> <span class="n">type_</span><span class="p">):</span>
        <span class="n">sub_type_t</span> <span class="o">=</span> <span class="n">inst</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="n">type_v</span> <span class="o">=</span> <span class="n">type_</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>

        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;type_&quot;</span><span class="p">,</span> <span class="n">type_v</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">Type</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">type_v</span> <span class="o">==</span> <span class="n">mstype</span><span class="o">.</span><span class="n">list_</span><span class="p">:</span>
            <span class="n">value</span> <span class="o">=</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">sub_type_t</span><span class="p">,</span> <span class="nb">list</span><span class="p">)</span>
        <span class="k">elif</span> <span class="n">type_v</span> <span class="o">==</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tuple_</span><span class="p">:</span>
            <span class="n">value</span> <span class="o">=</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">sub_type_t</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">value</span> <span class="o">=</span> <span class="n">mstype</span><span class="o">.</span><span class="n">issubclass_</span><span class="p">(</span><span class="n">sub_type_t</span><span class="p">,</span> <span class="n">type_v</span><span class="p">)</span>

        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="p">(),</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">mstype</span><span class="o">.</span><span class="n">type_type</span><span class="p">,</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">value</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span></div>


<span class="k">class</span> <span class="nc">Reshape</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Reshapes the input tensor with the same values based on a given shape tuple.</span>

<span class="sd">    The &#39;input_shape&#39; can only have one -1 at most, in which case it’s inferred from the remaining dimensions and</span>
<span class="sd">    the number of elements in the input.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">        - **input_shape** (tuple[int]) - The input tuple is constructed by multiple</span>
<span class="sd">          integers, i.e., :math:`(y_1, y_2, ..., y_S)`. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: Given a shape tuple, if it has several -1; or if the product</span>
<span class="sd">            of its elements is less than or equal to 0 or cannot be divided by the product</span>
<span class="sd">            of the input tensor shape; or if it does not match the input&#39;s array size.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; reshape = ops.Reshape()</span>
<span class="sd">        &gt;&gt;&gt; output = reshape(input_x, (3, 2))</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[-0.1  0.3]</span>
<span class="sd">         [ 3.6  0.4]</span>
<span class="sd">         [ 0.5 -3.2]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Reshape&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;tensor&#39;</span><span class="p">,</span> <span class="s1">&#39;shape&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">_get_shape_and_range</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">shape</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot; get min and max shape when output shape is dynamic&quot;&quot;&quot;</span>
        <span class="n">min_shape</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="n">max_shape</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="n">x_shp</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="k">if</span> <span class="n">is_shape_unknown</span><span class="p">(</span><span class="n">shape</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]):</span>
            <span class="n">out_shape</span> <span class="o">=</span> <span class="p">[</span><span class="o">-</span><span class="mi">2</span><span class="p">]</span>
            <span class="k">return</span> <span class="n">out_shape</span><span class="p">,</span> <span class="n">min_shape</span><span class="p">,</span> <span class="n">max_shape</span>

        <span class="n">shape_rank</span> <span class="o">=</span> <span class="n">shape</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">][</span><span class="mi">0</span><span class="p">]</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">x_shp</span><span class="p">:</span>
            <span class="c1"># x is a scalar, output shape fixed</span>
            <span class="n">out_shape</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="n">shape_rank</span>
            <span class="k">return</span> <span class="n">out_shape</span><span class="p">,</span> <span class="n">min_shape</span><span class="p">,</span> <span class="n">max_shape</span>

        <span class="n">out_shape</span> <span class="o">=</span> <span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="n">shape_rank</span>
        <span class="k">if</span> <span class="s2">&quot;max_value&quot;</span> <span class="ow">in</span> <span class="n">shape</span> <span class="ow">and</span> <span class="s2">&quot;min_value&quot;</span> <span class="ow">in</span> <span class="n">shape</span><span class="p">:</span>
            <span class="n">min_shape</span> <span class="o">=</span> <span class="n">shape</span><span class="p">[</span><span class="s2">&quot;min_value&quot;</span><span class="p">]</span>
            <span class="n">max_shape</span> <span class="o">=</span> <span class="n">shape</span><span class="p">[</span><span class="s2">&quot;max_value&quot;</span><span class="p">]</span>
            <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">min_shape</span><span class="p">)</span> <span class="o">!=</span> <span class="n">shape_rank</span> <span class="ow">or</span> <span class="nb">len</span><span class="p">(</span><span class="n">max_shape</span><span class="p">)</span> <span class="o">!=</span> <span class="n">shape_rank</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;The primitive[Reshape]&#39;s input[shape] min or max value not math the shape rank.&quot;</span><span class="p">)</span>
            <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">shape_rank</span><span class="p">):</span>
                <span class="k">if</span> <span class="n">min_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">==</span> <span class="n">max_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]:</span>
                    <span class="n">out_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">min_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
        <span class="k">elif</span> <span class="n">is_shape_unknown</span><span class="p">(</span><span class="n">x_shp</span><span class="p">)</span> <span class="ow">and</span> <span class="s2">&quot;max_shape&quot;</span> <span class="ow">in</span> <span class="n">x</span><span class="p">:</span>
            <span class="c1"># when dynamic memory allocation is supported, max_shape can be left out</span>
            <span class="n">min_shape</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="n">shape_rank</span>
            <span class="n">max_shape</span> <span class="o">=</span> <span class="p">[</span><span class="nb">int</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">prod</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s2">&quot;max_shape&quot;</span><span class="p">]))]</span> <span class="o">*</span> <span class="n">shape_rank</span>
        <span class="k">return</span> <span class="n">out_shape</span><span class="p">,</span> <span class="n">min_shape</span><span class="p">,</span> <span class="n">max_shape</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">shape</span><span class="p">):</span>
        <span class="n">shape_v</span> <span class="o">=</span> <span class="n">shape</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">x_shp</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="c1"># for shape is not constant</span>
        <span class="k">if</span> <span class="n">shape_v</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">out_shape</span><span class="p">,</span> <span class="n">min_shape</span><span class="p">,</span> <span class="n">max_shape</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_shape_and_range</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">shape</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">is_shape_unknown</span><span class="p">(</span><span class="n">out_shape</span><span class="p">):</span>
                <span class="c1"># `min_shape` and `max_shape` can&#39;t be None before dynamic memory allocation is supported</span>
                <span class="n">shape_shp</span> <span class="o">=</span> <span class="n">shape</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
                <span class="n">shape_rank</span> <span class="o">=</span> <span class="mi">1</span> <span class="k">if</span> <span class="n">is_shape_unknown</span><span class="p">(</span><span class="n">shape_shp</span><span class="p">)</span> <span class="k">else</span> <span class="n">shape_shp</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
                <span class="n">min_shape</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="n">shape_rank</span> <span class="k">if</span> <span class="n">min_shape</span> <span class="ow">is</span> <span class="kc">None</span> <span class="k">else</span> <span class="n">min_shape</span>
                <span class="n">max_shape</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="n">shape_rank</span> <span class="k">if</span> <span class="n">max_shape</span> <span class="ow">is</span> <span class="kc">None</span> <span class="k">else</span> <span class="n">max_shape</span>
            <span class="k">return</span> <span class="p">{</span>
                <span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">out_shape</span><span class="p">,</span>
                <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">,</span>
                <span class="s1">&#39;max_shape&#39;</span><span class="p">:</span> <span class="n">max_shape</span><span class="p">,</span>
                <span class="s1">&#39;min_shape&#39;</span><span class="p">:</span> <span class="n">min_shape</span>
            <span class="p">}</span>

        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">shape_v</span><span class="p">,</span> <span class="n">Tensor_</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;shape&quot;</span><span class="p">,</span> <span class="n">shape</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">shape_v</span> <span class="o">=</span> <span class="n">shape_v</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">()</span><span class="o">.</span><span class="n">tolist</span><span class="p">()</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;shape&quot;</span><span class="p">,</span> <span class="n">shape_v</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">shape_v</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">shape_v</span><span class="p">)</span>

        <span class="n">neg_index</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span>
        <span class="n">dim_prod</span> <span class="o">=</span> <span class="mi">1</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">shp_i</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">shape_v</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;shape[</span><span class="si">%d</span><span class="s2">]&quot;</span> <span class="o">%</span> <span class="n">i</span><span class="p">,</span> <span class="n">shp_i</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">shp_i</span> <span class="o">==</span> <span class="o">-</span><span class="mi">1</span><span class="p">:</span>
                <span class="k">if</span> <span class="n">neg_index</span> <span class="o">!=</span> <span class="o">-</span><span class="mi">1</span><span class="p">:</span>
                    <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, there can be at most one &#39;-1&#39; in &#39;input_shape&#39;, &quot;</span>
                                     <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="n">shape_v</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
                <span class="n">neg_index</span> <span class="o">=</span> <span class="n">i</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">dim_prod</span> <span class="o">*=</span> <span class="n">shp_i</span>

        <span class="k">if</span> <span class="n">is_shape_unknown</span><span class="p">(</span><span class="n">x_shp</span><span class="p">):</span>
            <span class="k">if</span> <span class="s1">&#39;max_shape&#39;</span> <span class="ow">in</span> <span class="n">x</span><span class="p">:</span>
                <span class="n">x_max_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;max_shape&#39;</span><span class="p">]</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">x_max_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
            <span class="k">if</span> <span class="s1">&#39;min_shape&#39;</span> <span class="ow">in</span> <span class="n">x</span><span class="p">:</span>
                <span class="n">x_min_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;min_shape&#39;</span><span class="p">]</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">x_min_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
            <span class="n">max_arr_prod</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">prod</span><span class="p">(</span><span class="n">x_max_shape</span><span class="p">)</span>
            <span class="n">min_arr_prod</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">prod</span><span class="p">(</span><span class="n">x_min_shape</span><span class="p">)</span>
            <span class="n">max_shape</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">shape_v</span><span class="p">)</span>
            <span class="n">min_shape</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">shape_v</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">neg_index</span> <span class="o">!=</span> <span class="o">-</span><span class="mi">1</span><span class="p">:</span>
                <span class="n">max_shape</span><span class="p">[</span><span class="n">neg_index</span><span class="p">]</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">max_arr_prod</span> <span class="o">/</span> <span class="n">dim_prod</span><span class="p">)</span>
                <span class="n">min_shape</span><span class="p">[</span><span class="n">neg_index</span><span class="p">]</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">min_arr_prod</span> <span class="o">/</span> <span class="n">dim_prod</span><span class="p">)</span>

            <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">shape_v</span><span class="p">,</span>
                   <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                   <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">,</span>
                   <span class="s1">&#39;max_shape&#39;</span><span class="p">:</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">max_shape</span><span class="p">),</span>
                   <span class="s1">&#39;min_shape&#39;</span><span class="p">:</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">min_shape</span><span class="p">)}</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">arr_prod</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">prod</span><span class="p">(</span><span class="n">x_shp</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">dim_prod</span> <span class="o">&lt;=</span> <span class="mi">0</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the shape of &#39;input_x&#39; is </span><span class="si">{</span><span class="n">x_shp</span><span class="si">}</span><span class="s2">, &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;the value of &#39;input_shape&#39; is </span><span class="si">{</span><span class="n">shape_v</span><span class="si">}</span><span class="s2">. &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;The product of &#39;input_shape&#39; should &gt; 0, but got </span><span class="si">{</span><span class="n">dim_prod</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">neg_index</span> <span class="o">!=</span> <span class="o">-</span><span class="mi">1</span><span class="p">:</span>
                <span class="n">shape_v</span><span class="p">[</span><span class="n">neg_index</span><span class="p">]</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">arr_prod</span> <span class="o">/</span> <span class="n">dim_prod</span><span class="p">)</span>
                <span class="n">dim_prod</span> <span class="o">*=</span> <span class="n">shape_v</span><span class="p">[</span><span class="n">neg_index</span><span class="p">]</span>
            <span class="k">if</span> <span class="n">dim_prod</span> <span class="o">!=</span> <span class="n">arr_prod</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the shape of &#39;input_x&#39; is </span><span class="si">{</span><span class="n">x_shp</span><span class="si">}</span><span class="s2">, &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;the value of &#39;input_shape&#39; value is </span><span class="si">{</span><span class="n">shape_v</span><span class="si">}</span><span class="s2">. &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;The product of the shape of &#39;input_x&#39; should be equal to product of &#39;input_shape&#39;, &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;but product of the shape of &#39;input_x&#39; is </span><span class="si">{</span><span class="n">arr_prod</span><span class="si">}</span><span class="s2">, &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;product of &#39;input_shape&#39; is </span><span class="si">{</span><span class="n">dim_prod</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
            <span class="n">value</span> <span class="o">=</span> <span class="kc">None</span>
            <span class="k">if</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
                <span class="n">value</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">()</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">shape_v</span><span class="p">))</span>

            <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">shape_v</span><span class="p">),</span>
                   <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                   <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">value</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span>


<div class="viewcode-block" id="Shape"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Shape.html#mindspore.ops.Shape">[docs]</a><span class="k">class</span> <span class="nc">Shape</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns the shape of the input tensor. And it used to be static shape.</span>

<span class="sd">    static shape: A shape that can be obtained without running the graph. It is an inherent property of tensor and</span>
<span class="sd">    may be unknown. The static shape information can be completed by artificial setting.</span>
<span class="sd">    No matter what the input of the graph is, the static shape is not affected.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        tuple[int], the output tuple is constructed by multiple integers,</span>
<span class="sd">        :math:`(x_1, x_2, ..., x_R)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a Tensor.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; shape = ops.Shape()</span>
<span class="sd">        &gt;&gt;&gt; output = shape(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (3, 2, 1)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Shape&quot;&quot;&quot;</span></div>


<span class="k">class</span> <span class="nc">DynamicShape</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns the shape of the input tensor. And it used to be dynamic shape.</span>

<span class="sd">    Note:</span>
<span class="sd">        Dynamic shape: After the graph is running, as the tensor flows in the graph, the specific shape of the tensor</span>
<span class="sd">        on each node on the graph can be inferred according to the structure of the graph.</span>
<span class="sd">        This shape is called a dynamic shape. As the input shape of the graph is different,</span>
<span class="sd">        the dynamic shape of the tensor in the graph will change.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor[int], 1-dim Tensor of type int32</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a Tensor.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; shape = ops.DynamicShape()</span>
<span class="sd">        &gt;&gt;&gt; output = shape(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [3 2 1]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;init Shape&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;tensor&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;is_dynamic_shape&#39;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">Squeeze</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns a tensor with the same data type but dimensions of 1 are removed based on `axis`.</span>

<span class="sd">    If `axis` is specified, it will remove the dimensions of size 1 in the given `axis`.</span>
<span class="sd">    If `axis` is None, it will remove all the dimensions of size 1.</span>
<span class="sd">    For example, if input is of shape: (A×1×B×C×1×D), then the out tensor will be of shape: (A×B×C×D);</span>
<span class="sd">    When dim is given, a squeeze operation is done only in the given dimension.</span>
<span class="sd">    If input is of shape: (A×1×B), squeeze(input, 0) leaves the tensor unchanged,</span>
<span class="sd">    but squeeze(input, 1) will squeeze the tensor to the shape (A×B).</span>

<span class="sd">    Please note that in dynamic graph mode, the output Tensor will share data with the input Tensor,</span>
<span class="sd">    and there is no Tensor data copy process.</span>

<span class="sd">    Note:</span>
<span class="sd">        The dimension index starts at 0 and must be in the range `[-input.ndim, input.ndim]`.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will remove</span>
<span class="sd">            all the dimensions that are equal to 1. If specified, it must be int32 or int64.</span>
<span class="sd">            Default: (), an empty tuple.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `axis` is neither an int nor tuple.</span>
<span class="sd">        TypeError: If `axis` is a tuple whose elements are not all int.</span>
<span class="sd">        ValueError: If the corresponding dimension of the specified axis isn&#39;t equal to 1.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; squeeze = ops.Squeeze(2)</span>
<span class="sd">        &gt;&gt;&gt; output = squeeze(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 1.]</span>
<span class="sd">         [1. 1.]</span>
<span class="sd">         [1. 1.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="p">()):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Squeeze&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;axis&#39;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">axis</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">):</span>
            <span class="k">for</span> <span class="n">idx</span><span class="p">,</span> <span class="n">item</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">axis</span><span class="p">):</span>
                <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;axis[</span><span class="si">%d</span><span class="s2">]&quot;</span> <span class="o">%</span> <span class="n">idx</span><span class="p">,</span> <span class="n">item</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="p">(</span><span class="n">axis</span><span class="p">,)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="p">(</span><span class="n">axis</span><span class="p">,))</span>


<div class="viewcode-block" id="Transpose"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Transpose.html#mindspore.ops.Transpose">[docs]</a><span class="k">class</span> <span class="nc">Transpose</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Permutes the dimensions of the input tensor according to input permutation.</span>

<span class="sd">    For a 1-D array this has no effect, as a transposed vector is simply the same vector.</span>
<span class="sd">    To convert a 1-D array into a 2D column vector please refer the class: mindspore.ops.ExpandDims.</span>
<span class="sd">    For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given,</span>
<span class="sd">    their order indicates how the axes are permuted (see Examples).</span>
<span class="sd">    If axes are not provided and a.shape = (i[0], i[1], ... i[n-2], i[n-1]),</span>
<span class="sd">    then a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0]).</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">        - **input_perm** (tuple[int]) - The permutation to be converted. The elements in `input_perm` are composed of</span>
<span class="sd">          the indexes of each dimension of `input_x`. The length of `input_perm` and the shape of `input_x` must be</span>
<span class="sd">          the same. Only constant value is allowed. Must be in the range [0, rank(input_x)).</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the type of output tensor is the same as `input_x` and the shape of output tensor is decided by the</span>
<span class="sd">        shape of `input_x` and the value of `input_perm`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_perm` is not a tuple.</span>
<span class="sd">        ValueError: If length of shape of `input_x` is not equal to length of shape of `input_perm`.</span>
<span class="sd">        ValueError: If the same element exists in `input_perm`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; input_perm = (0, 2, 1)</span>
<span class="sd">        &gt;&gt;&gt; transpose = ops.Transpose()</span>
<span class="sd">        &gt;&gt;&gt; output = transpose(input_x, input_perm)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[ 1.  4.]</span>
<span class="sd">          [ 2.  5.]</span>
<span class="sd">          [ 3.  6.]]</span>
<span class="sd">         [[ 7. 10.]</span>
<span class="sd">          [ 8. 11.]</span>
<span class="sd">          [ 9. 12.]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Transpose&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;perm&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span></div>


<div class="viewcode-block" id="Unique"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Unique.html#mindspore.ops.Unique">[docs]</a><span class="k">class</span> <span class="nc">Unique</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns the unique elements of input tensor and also return a tensor containing the index of each value of input</span>
<span class="sd">    tensor corresponding to the output unique tensor.</span>

<span class="sd">    The output contains Tensor `y` and Tensor `idx`, the format is probably similar to (`y`, `idx`).</span>
<span class="sd">    The shape of Tensor `y` and Tensor `idx` is different in most cases, because Tensor `y` will be duplicated,</span>
<span class="sd">    and the shape of Tensor `idx` is consistent with the input.</span>

<span class="sd">    To get the same shape between `idx` and `y`, please ref to &#39;UniqueWithPad&#39; operator.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input tensor.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tuple, containing Tensor objects (`y`, `idx`), `y` is a tensor with the</span>
<span class="sd">        same type as `input_x`, and contains the unique elements in `x`, sorted in</span>
<span class="sd">        ascending order. `idx` is a tensor containing indices of elements in</span>
<span class="sd">        the input corresponding to the output tensor.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a Tensor.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; output = ops.Unique()(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))</span>
<span class="sd">        &gt;&gt;&gt; y = output[0]</span>
<span class="sd">        &gt;&gt;&gt; print(y)</span>
<span class="sd">        [1 2 5]</span>
<span class="sd">        &gt;&gt;&gt; idx = output[1]</span>
<span class="sd">        &gt;&gt;&gt; print(idx)</span>
<span class="sd">        [0 1 2 1]</span>
<span class="sd">        &gt;&gt;&gt; # As can be seen from the above, y and idx shape</span>
<span class="sd">        &gt;&gt;&gt; # note that for GPU, this operator must be wrapped inside a model, and executed in graph mode.</span>
<span class="sd">        &gt;&gt;&gt; class UniqueNet(nn.Cell):</span>
<span class="sd">        ...     def __init__(self):</span>
<span class="sd">        ...         super(UniqueNet, self).__init__()</span>
<span class="sd">        ...         self.unique_op = ops.Unique()</span>
<span class="sd">        ...</span>
<span class="sd">        ...     def construct(self, x):</span>
<span class="sd">        ...         output, indices = self.unique_op(x)</span>
<span class="sd">        ...         return output, indices</span>
<span class="sd">        ...</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; net = UniqueNet()</span>
<span class="sd">        &gt;&gt;&gt; output = net(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span></div>


<span class="k">class</span> <span class="nc">Gather</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns a slice of the input tensor based on the specified indices and axis.</span>

<span class="sd">    Slices the input tensor base on the indices at specified axis. See the following example for more clear.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">          The original Tensor.</span>
<span class="sd">        - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.</span>
<span class="sd">          Specifies the indices of elements of the original Tensor. Must be in the range</span>
<span class="sd">          `[0, input_param.shape[axis])` which are only validated on CPU. The data type can be int32 or int64.</span>
<span class="sd">        - **axis** (int) - Specifies the dimension index to gather indices.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of tensor is</span>
<span class="sd">        :math:`input\_params.shape[:axis] + input\_indices.shape + input\_params.shape[axis + 1:]`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `axis` is not an int.</span>
<span class="sd">        TypeError: If `input_indices` is not an int.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; input_indices = Tensor(np.array([1, 2]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; axis = 1</span>
<span class="sd">        &gt;&gt;&gt; output = ops.Gather()(input_params, input_indices, axis)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 2.  7.]</span>
<span class="sd">         [ 4. 54.]</span>
<span class="sd">         [ 2. 55.]]</span>
<span class="sd">        &gt;&gt;&gt; axis = 0</span>
<span class="sd">        &gt;&gt;&gt; output = ops.Gather()(input_params, input_indices, axis)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[3. 4. 54. 22.]</span>
<span class="sd">         [2. 2. 55.  3.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Gather&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;params&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;axis&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>


<span class="k">class</span> <span class="nc">GatherV2</span><span class="p">(</span><span class="n">PrimitiveWithCheck</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Same as operator Gather. GatherV2 will be deprecated in the future.</span>
<span class="sd">    Please use Gather instead.</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@deprecated</span><span class="p">(</span><span class="s2">&quot;1.1&quot;</span><span class="p">,</span> <span class="s2">&quot;Gather&quot;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize GatherV2&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;params&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;axis&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">__check__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">indices</span><span class="p">,</span> <span class="n">axis</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;params&quot;</span><span class="p">,</span> <span class="n">params</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;indices&quot;</span><span class="p">,</span> <span class="n">indices</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">number</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">axis_v</span> <span class="o">=</span> <span class="n">axis</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;axis&#39;</span><span class="p">,</span> <span class="n">axis_v</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">params</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">])</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="n">axis_v</span><span class="p">,</span> <span class="o">-</span><span class="n">rank</span><span class="p">,</span> <span class="n">rank</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_LEFT</span><span class="p">,</span> <span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>


<div class="viewcode-block" id="SparseGatherV2"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.SparseGatherV2.html#mindspore.ops.SparseGatherV2">[docs]</a><span class="k">class</span> <span class="nc">SparseGatherV2</span><span class="p">(</span><span class="n">PrimitiveWithCheck</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns a slice of input tensor based on the specified indices and axis.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">        - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.</span>
<span class="sd">          Specifies the indices of elements of the original Tensor, must be in the range</span>
<span class="sd">          `[0, input_param.shape[axis])`.</span>
<span class="sd">        - **axis** (int) - Specifies the dimension index to gather indices.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `axis` is not an int.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; input_indices = Tensor(np.array([1, 2]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; axis = 1</span>
<span class="sd">        &gt;&gt;&gt; out = ops.SparseGatherV2()(input_params, input_indices, axis)</span>
<span class="sd">        &gt;&gt;&gt; print(out)</span>
<span class="sd">        [[2. 7.]</span>
<span class="sd">         [4. 54.]</span>
<span class="sd">         [2. 55.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize SparseGatherV2&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;params&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;axis&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">__check__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">indices</span><span class="p">,</span> <span class="n">axis</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;params&quot;</span><span class="p">,</span> <span class="n">params</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;indices&quot;</span><span class="p">,</span> <span class="n">indices</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">number</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">axis_v</span> <span class="o">=</span> <span class="n">axis</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;axis&#39;</span><span class="p">,</span> <span class="n">axis_v</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">params</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">])</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="n">axis_v</span><span class="p">,</span> <span class="o">-</span><span class="n">rank</span><span class="p">,</span> <span class="n">rank</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_LEFT</span><span class="p">,</span> <span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span></div>


<div class="viewcode-block" id="Padding"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Padding.html#mindspore.ops.Padding">[docs]</a><span class="k">class</span> <span class="nc">Padding</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Extends the last dimension of the input tensor from 1 to pad_dim_size, by filling with 0.</span>

<span class="sd">    Args:</span>
<span class="sd">        pad_dim_size (int): The value of the last dimension of `x` to be extended, which must be positive. Default: 8.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The rank of `x` must be at least 2.</span>
<span class="sd">          The last dimension of `x` must be 1. The data type is Number.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `pad_dim_size` is not an int.</span>
<span class="sd">        ValueError: If `pad_dim_size` is less than 1.</span>
<span class="sd">        ValueError: If last dim of `x` is not equal to 1.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[8], [10]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; pad_dim_size = 4</span>
<span class="sd">        &gt;&gt;&gt; output = ops.Padding(pad_dim_size)(x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 8.  0.  0.  0.]</span>
<span class="sd">         [10.  0.  0.  0.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">pad_dim_size</span><span class="o">=</span><span class="mi">8</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize padding&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;pad_dim_size&quot;</span><span class="p">,</span> <span class="n">pad_dim_size</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">pad_dim_size</span><span class="p">,</span> <span class="s2">&quot;pad_dim_size&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">pad_dim_size</span> <span class="o">=</span> <span class="n">pad_dim_size</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">])</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">),</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GT</span><span class="p">,</span> <span class="s2">&quot;rank of x&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="n">x_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">],</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="s2">&quot;last dim of x&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">out_shape</span> <span class="o">=</span> <span class="n">x_shape</span>
        <span class="n">out_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">pad_dim_size</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">out_shape</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span></div>


<div class="viewcode-block" id="UniqueWithPad"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.UniqueWithPad.html#mindspore.ops.UniqueWithPad">[docs]</a><span class="k">class</span> <span class="nc">UniqueWithPad</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns unique elements and relative indexes in 1-D tensor, filled with padding num.</span>

<span class="sd">    The basic function is the same as the Unique operator, but the UniqueWithPad operator adds a Pad function.</span>
<span class="sd">    The returned tuple(`y`, `idx`) after the input Tensor `x` is processed by the unique operator,</span>
<span class="sd">    in which the shapes of `y` and `idx` are mostly not equal. Therefore, in order to solve the above situation,</span>
<span class="sd">    the UniqueWithPad operator will fill the `y` Tensor with the `pad_num` specified by the user</span>
<span class="sd">    to make it have the same shape as the Tensor `idx`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - The tensor need to be unique. Must be 1-D vector with types: int32, int64.</span>
<span class="sd">        - **pad_num** (int) - Pad num. The data type is an int.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        tuple(Tensor), tuple of 2 tensors, `y` and `idx`.</span>
<span class="sd">        - y (Tensor) - The unique elements filled with pad_num, the shape and data type same as `x`.</span>
<span class="sd">        - idx (Tensor) - The index of each value of `x` in the unique output `y`, the shape and data type same as `x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `x` is neither int32 nor int64.</span>
<span class="sd">        ValueError: If length of shape of `x` is not equal to 1.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([1, 1, 5, 5, 4, 4, 3, 3, 2, 2,]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; pad_num = 8</span>
<span class="sd">        &gt;&gt;&gt; output = ops.UniqueWithPad()(x, pad_num)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[10], dtype=Int32, value= [1, 5, 4, 3, 2, 8, 8, 8, 8, 8]),</span>
<span class="sd">         Tensor(shape=[10], dtype=Int32, value= [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]))</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;init UniqueWithPad&quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">pad_num</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;pad_num&quot;</span><span class="p">,</span> <span class="n">pad_num</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">])</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;rank of x&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">),</span> <span class="s2">&quot;expected&quot;</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">out_shape</span> <span class="o">=</span> <span class="n">x_shape</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="p">(</span><span class="n">out_shape</span><span class="p">,</span> <span class="n">out_shape</span><span class="p">),</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]),</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span></div>


<div class="viewcode-block" id="Split"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Split.html#mindspore.ops.Split">[docs]</a><span class="k">class</span> <span class="nc">Split</span><span class="p">(</span><span class="n">PrimitiveWithCheck</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Splits the input tensor into output_num of tensors along the given axis and output numbers.</span>

<span class="sd">    The `input_x` tensor will be split into equally sized sub-tensors.</span>
<span class="sd">    This requires that `input_x.shape(axis)` is divisible by `output_num`.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (int): Index of the split position. Default: 0.</span>
<span class="sd">        output_num (int): The number of output tensors. Must be positive int. Default: 1.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        tuple[Tensor], the shape of each output tensor is the same, which is</span>
<span class="sd">        :math:`(y_1, y_2, ..., y_S)`. And the data type is the same with `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `axis` or `output_num` is not an int.</span>
<span class="sd">        ValueError: If `axis` is out of the range [-len(`input_x.shape`), len(`input_x.shape`)),</span>
<span class="sd">            or if the `output_num` is less than or equal to 0.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; split = ops.Split(1, 2)</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; print(x)</span>
<span class="sd">        [[1 1 1 1]</span>
<span class="sd">         [2 2 2 2]]</span>
<span class="sd">        &gt;&gt;&gt; output = split(x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[2, 2], dtype=Int32, value=</span>
<span class="sd">        [[1, 1],</span>
<span class="sd">         [2, 2]]), Tensor(shape=[2, 2], dtype=Int32, value=</span>
<span class="sd">        [[1, 1],</span>
<span class="sd">         [2, 2]]))</span>
<span class="sd">        &gt;&gt;&gt; split = ops.Split(1, 4)</span>
<span class="sd">        &gt;&gt;&gt; output = split(x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[2, 1], dtype=Int32, value=</span>
<span class="sd">        [[1],</span>
<span class="sd">         [2]]), Tensor(shape=[2, 1], dtype=Int32, value=</span>
<span class="sd">        [[1],</span>
<span class="sd">         [2]]), Tensor(shape=[2, 1], dtype=Int32, value=</span>
<span class="sd">        [[1],</span>
<span class="sd">         [2]]), Tensor(shape=[2, 1], dtype=Int32, value=</span>
<span class="sd">        [[1],</span>
<span class="sd">         [2]]))</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">output_num</span><span class="o">=</span><span class="mi">1</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Split&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;output_num&quot;</span><span class="p">,</span> <span class="n">output_num</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">output_num</span><span class="p">,</span> <span class="s2">&quot;output_num&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">output_num</span> <span class="o">=</span> <span class="n">output_num</span>

    <span class="k">def</span> <span class="nf">__check__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">])</span>
        <span class="n">dim</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">,</span> <span class="o">-</span><span class="n">dim</span><span class="p">,</span> <span class="n">dim</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_LEFT</span><span class="p">,</span> <span class="s1">&#39;axis value&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">x_shape</span><span class="p">:</span>
            <span class="c1"># only validate when shape fully known</span>
            <span class="n">output_valid_check</span> <span class="o">=</span> <span class="n">x_shape</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">]</span> <span class="o">%</span> <span class="bp">self</span><span class="o">.</span><span class="n">output_num</span>
            <span class="k">if</span> <span class="n">output_valid_check</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the specified axis of &#39;input_x&#39; should be divided exactly by &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;&#39;output_num&#39;, but got the shape of &#39;input_x&#39; in &#39;axis&#39; </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="si">}</span><span class="s2"> is &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">x_shape</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">]</span><span class="si">}</span><span class="s2">, &#39;output_num&#39;: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">output_num</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="n">size_splits</span> <span class="o">=</span> <span class="p">[</span><span class="n">x_shape</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">]</span> <span class="o">//</span> <span class="bp">self</span><span class="o">.</span><span class="n">output_num</span><span class="p">]</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">output_num</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;size_splits&#39;</span><span class="p">,</span> <span class="n">size_splits</span><span class="p">)</span></div>


<div class="viewcode-block" id="Rank"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Rank.html#mindspore.ops.Rank">[docs]</a><span class="k">class</span> <span class="nc">Rank</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns the rank of a tensor.</span>

<span class="sd">    Returns a 0-D int32 Tensor representing the rank of input; the rank of a tensor</span>
<span class="sd">    is the number of indices required to uniquely select each element of the tensor.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor. 0-D int32 Tensor representing the rank of input, i.e., :math:`R`. The data type is an int.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a Tensor.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; rank = ops.Rank()</span>
<span class="sd">        &gt;&gt;&gt; output = rank(input_tensor)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        2</span>
<span class="sd">        &gt;&gt;&gt; print(type(output))</span>
<span class="sd">        &lt;class &#39;int&#39;&gt;</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Rank&quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">,</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="nb">len</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">])}</span>
        <span class="k">return</span> <span class="n">out</span></div>


<span class="k">class</span> <span class="nc">TruncatedNormal</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns a tensor of the specified shape filled with truncated normal values.</span>

<span class="sd">    The generated values follow a normal distribution.</span>

<span class="sd">    Args:</span>
<span class="sd">        seed (int): A integer number used to create random seed. Default: 0.</span>
<span class="sd">        dtype (:class:`mindspore.dtype`): Data type. Default: mindspore.float32.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **shape** (tuple[int]) - The shape of the output tensor, is a tuple of positive integer.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the data type of output tensor is the same as attribute `dtype`.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; shape = (1, 2, 3)</span>
<span class="sd">        &gt;&gt;&gt; truncated_normal = ops.TruncatedNormal()</span>
<span class="sd">        &gt;&gt;&gt; output = truncated_normal(shape)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">seed</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize TruncatedNormal&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;seed&#39;</span><span class="p">,</span> <span class="n">seed</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_types_same_and_valid</span><span class="p">({</span><span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">dtype</span><span class="p">},</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">shape</span><span class="p">):</span>
        <span class="n">shape_value</span> <span class="o">=</span> <span class="n">shape</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;shape&quot;</span><span class="p">,</span> <span class="n">shape_value</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">value</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">shape_value</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">value</span><span class="p">,</span> <span class="sa">f</span><span class="s1">&#39;</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s1">th value of shape&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">shape_value</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">dtype</span><span class="p">),</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span>


<span class="k">class</span> <span class="nc">Size</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns the size of a Tensor.</span>

<span class="sd">    Returns an int scalar representing the elements&#39; size of input, the total number of elements in the tensor.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        int. A scalar representing the elements&#39; size of `input_x`, tensor is the number of elements</span>
<span class="sd">        in a tensor, :math:`size=x_1*x_2*...x_R`. The data type is an int.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a Tensor.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; size = ops.Size()</span>
<span class="sd">        &gt;&gt;&gt; output = size(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        4</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Size&quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">size</span> <span class="o">=</span> <span class="mi">1</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">shp</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">shp</span><span class="p">:</span>
            <span class="n">size</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">size</span> <span class="o">=</span> <span class="n">functools</span><span class="o">.</span><span class="n">reduce</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">:</span> <span class="n">x</span> <span class="o">*</span> <span class="n">y</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">])</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">,</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">size</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span>


<span class="k">class</span> <span class="nc">Fill</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Creates a tensor filled with a scalar value.</span>

<span class="sd">    Creates a tensor with shape described by the first argument and fills it with values in the second argument.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **type** (mindspore.dtype) - The specified type of output tensor. Only constant value is allowed.</span>
<span class="sd">        - **shape** (tuple) - The specified shape of output tensor. Only constant value is allowed.</span>
<span class="sd">        - **value** (scalar) - Value to fill the returned tensor. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same type and shape as input value.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `shape` is not a tuple.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; fill = ops.Fill()</span>
<span class="sd">        &gt;&gt;&gt; output = fill(mindspore.float32, (2, 2), 1)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 1.]</span>
<span class="sd">         [1. 1.]]</span>
<span class="sd">        &gt;&gt;&gt; output = fill(mindspore.float32, (3, 3), 0)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0. 0. 0.]</span>
<span class="sd">         [0. 0. 0.]</span>
<span class="sd">         [0. 0. 0.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Fill&quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dtype</span><span class="p">,</span> <span class="n">dims</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;shape&quot;</span><span class="p">,</span> <span class="n">dims</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;value&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="p">[</span><span class="n">numbers</span><span class="o">.</span><span class="n">Number</span><span class="p">,</span> <span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">item</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">dims</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">item</span><span class="p">,</span> <span class="sa">f</span><span class="s1">&#39;dims[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s1">]&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">valid_dtypes</span> <span class="o">=</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">bool_</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int8</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">,</span>
                        <span class="n">mstype</span><span class="o">.</span><span class="n">uint8</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">uint16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">uint32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">uint64</span><span class="p">,</span>
                        <span class="n">mstype</span><span class="o">.</span><span class="n">float16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float64</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">complex64</span><span class="p">,</span>
                        <span class="n">mstype</span><span class="o">.</span><span class="n">complex128</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_types_same_and_valid</span><span class="p">({</span><span class="s2">&quot;value&quot;</span><span class="p">:</span> <span class="n">dtype</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]},</span> <span class="n">valid_dtypes</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">x_nptype</span> <span class="o">=</span> <span class="n">mstype</span><span class="o">.</span><span class="n">dtype_to_nptype</span><span class="p">(</span><span class="n">dtype</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">])</span>
        <span class="n">ret</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">full</span><span class="p">(</span><span class="n">dims</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="n">x_nptype</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span>
            <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">ret</span><span class="p">),</span>
            <span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">dims</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">],</span>
            <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
        <span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span>


<div class="viewcode-block" id="Ones"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Ones.html#mindspore.ops.Ones">[docs]</a><span class="k">class</span> <span class="nc">Ones</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Creates a tensor filled with value ones.</span>

<span class="sd">    Creates a tensor with shape described by the first argument and</span>
<span class="sd">    fills it with value ones in type of the second argument.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **shape** (Union[tuple[int], int]) - The specified shape of output tensor.</span>
<span class="sd">          Only constant positive int is allowed.</span>
<span class="sd">        - **type** (mindspore.dtype) - The specified type of output tensor. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same type and shape as input shape value.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `shape` is neither tuple nor int.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; ones = ops.Ones()</span>
<span class="sd">        &gt;&gt;&gt; output = ones((2, 2), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 1.]</span>
<span class="sd">         [1. 1.]]</span>
<span class="sd">        &gt;&gt;&gt; output = ones((3, 3), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 1. 1.]</span>
<span class="sd">         [1. 1. 1.]</span>
<span class="sd">         [1. 1. 1.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Ones&quot;&quot;&quot;</span></div>


<div class="viewcode-block" id="Zeros"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Zeros.html#mindspore.ops.Zeros">[docs]</a><span class="k">class</span> <span class="nc">Zeros</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Creates a tensor filled with value zeros.</span>

<span class="sd">    Creates a tensor with shape described by the first argument and</span>
<span class="sd">    fills it with value zeros in type of the second argument.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **shape** (Union[tuple[int], int]) - The specified shape of output tensor.</span>
<span class="sd">          Only constant positive int is allowed.</span>
<span class="sd">        - **type** (mindspore.dtype) - The specified type of output tensor. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same type and shape as input shape value.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `shape` is neither int nor tuple.</span>
<span class="sd">        TypeError: If `shape` is a tuple whose elements are not all int.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; zeros = ops.Zeros()</span>
<span class="sd">        &gt;&gt;&gt; output = zeros((2, 2), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0. 0.]</span>
<span class="sd">         [0. 0.]]</span>

<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Zeros&quot;&quot;&quot;</span></div>


<span class="k">class</span> <span class="nc">OnesLike</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Creates a new tensor. The values of all elements are 1.</span>

<span class="sd">    Returns a tensor of ones with the same shape and type as the input.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - Input tensor.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `input_x` but filled with ones.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a Tensor.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; oneslike = ops.OnesLike()</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; output = oneslike(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1 1]</span>
<span class="sd">         [1 1]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize OnesLike&quot;&quot;&quot;</span>


<span class="k">class</span> <span class="nc">ZerosLike</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Creates a new tensor. All elements value are 0.</span>

<span class="sd">    Returns a tensor of zeros with the same shape and data type as the input tensor.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - Input tensor. The data type is int32, int64, float16 or float32.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and data type as `input_x` but filled with zeros.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a Tensor.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; zeroslike = ops.ZerosLike()</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; output = zeroslike(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0. 0.]</span>
<span class="sd">         [0. 0.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ZerosLike&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>


<div class="viewcode-block" id="TupleToArray"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.TupleToArray.html#mindspore.ops.TupleToArray">[docs]</a><span class="k">class</span> <span class="nc">TupleToArray</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Converts a tuple to a tensor.</span>

<span class="sd">    If the type of the first number in the tuple is integer, the data type of the output tensor is int.</span>
<span class="sd">    Otherwise, the data type of the output tensor is float.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (tuple) - A tuple of numbers. These numbers have the same type. Only constant value is allowed.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, if the input tuple contains `N` numbers, then the shape of the output tensor is (N,).</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a tuple.</span>
<span class="sd">        ValueError: If length of `input_x` is less than or equal to 0.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = (1,2,3)</span>
<span class="sd">        &gt;&gt;&gt; print(type(input_x))</span>
<span class="sd">        &lt;class &#39;tuple&#39;&gt;</span>
<span class="sd">        &gt;&gt;&gt; output = ops.TupleToArray()(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(type(output))</span>
<span class="sd">        &lt;class &#39;mindspore.common.tensor.Tensor&#39;&gt;</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [1 2 3]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize TupleToArray&quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">infer_value</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;size of x&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x</span><span class="p">),</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GT</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">dtype</span> <span class="o">=</span> <span class="nb">type</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="mi">0</span><span class="p">])</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">item</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">x</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;x[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">]&quot;</span><span class="p">,</span> <span class="n">item</span><span class="p">,</span> <span class="p">[</span><span class="n">numbers</span><span class="o">.</span><span class="n">Number</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="nb">all</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">item</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span> <span class="k">for</span> <span class="n">item</span> <span class="ow">in</span> <span class="n">x</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For </span><span class="se">\&#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="se">\&#39;</span><span class="s2">, all elements of &#39;input_x&#39; must be have same type.&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="nb">int</span><span class="p">):</span>
            <span class="n">ret</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">ret</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">ret</span><span class="p">)</span>

    <span class="k">def</span> <span class="fm">__call__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">args</span> <span class="o">=</span> <span class="nb">list</span><span class="p">()</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="nb">range</span><span class="p">):</span>
            <span class="n">args</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">tuple</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">args</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">_run_op</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="n">args</span><span class="p">)</span></div>


<div class="viewcode-block" id="ScalarToArray"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScalarToArray.html#mindspore.ops.ScalarToArray">[docs]</a><span class="k">class</span> <span class="nc">ScalarToArray</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Converts a scalar to a `Tensor`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Union[int, float]) - The input is a scalar. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor. 0-D Tensor and the content is the input.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is neither int nor float.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; op = ops.ScalarToArray()</span>
<span class="sd">        &gt;&gt;&gt; input_x = 1.0</span>
<span class="sd">        &gt;&gt;&gt; print(type(input_x))</span>
<span class="sd">        &lt;class &#39;float&#39;&gt;</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(type(output))</span>
<span class="sd">        &lt;class &#39;mindspore.common.tensor.Tensor&#39;&gt;</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        1.0</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">pass</span>

    <span class="k">def</span> <span class="nf">infer_value</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="nb">float</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span>
            <span class="n">ret</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">ret</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">ret</span><span class="p">)</span></div>


<div class="viewcode-block" id="ScalarToTensor"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScalarToTensor.html#mindspore.ops.ScalarToTensor">[docs]</a><span class="k">class</span> <span class="nc">ScalarToTensor</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Converts a scalar to a `Tensor`, and converts the data type to the specified type.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Union[int, float]) - The input is a scalar. Only constant value is allowed.</span>
<span class="sd">        - **dtype** (mindspore.dtype) - The target data type. Default: mindspore.float32. Only</span>
<span class="sd">          constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor. 0-D Tensor and the content is the input.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is neither int nor float.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; op = ops.ScalarToTensor()</span>
<span class="sd">        &gt;&gt;&gt; data = 1</span>
<span class="sd">        &gt;&gt;&gt; output = op(data, mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        1.0</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">pass</span>

    <span class="k">def</span> <span class="nf">infer_value</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="nb">float</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;dtype&quot;</span><span class="p">,</span> <span class="n">dtype</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">data_type</span> <span class="o">=</span> <span class="n">mstype</span><span class="o">.</span><span class="n">dtype_to_nptype</span><span class="p">(</span><span class="n">dtype</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">data_type</span><span class="p">))</span></div>


<div class="viewcode-block" id="InvertPermutation"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.InvertPermutation.html#mindspore.ops.InvertPermutation">[docs]</a><span class="k">class</span> <span class="nc">InvertPermutation</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Computes the inverse of an index permutation.</span>

<span class="sd">    This operator is mainly used to calculate the inverse of index permutation.</span>
<span class="sd">    It requires a 1-dimensional integer tensor x, which represents the index of a zero-based array,</span>
<span class="sd">    and exchanges each value with its index position. In other words, For output tensor y and input tensor x,</span>
<span class="sd">    this operation calculates the following values:</span>

<span class="sd">    :math:`y[x[i]] = i, \quad i \in [0, 1, \ldots, \text{len}(x)-1]`.</span>

<span class="sd">    Note:</span>
<span class="sd">        These values must include 0. There must be no duplicate values and the</span>
<span class="sd">        values can not be negative.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Union(tuple[int], list[int])) - The input is constructed by multiple</span>
<span class="sd">          integers, i.e., :math:`(y_1, y_2, ..., y_S)` representing the indices.</span>
<span class="sd">          The values must include 0. There can be no duplicate values or negative values.</span>
<span class="sd">          Only constant value is allowed. The maximum value must be equal to length of input_x.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        tuple[int]. It has the same length as the input.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is neither tuple nor list.</span>
<span class="sd">        TypeError: If element of `input_x` is not an int.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; invert = ops.InvertPermutation()</span>
<span class="sd">        &gt;&gt;&gt; input_data = (3, 4, 0, 2, 1)</span>
<span class="sd">        &gt;&gt;&gt; output = invert(input_data)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (2, 4, 3, 0, 1)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize InvertPermutation&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">set_const_prim</span><span class="p">(</span><span class="kc">True</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">x_shp</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">x_value</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="k">if</span> <span class="n">mstype</span><span class="o">.</span><span class="n">issubclass_</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For </span><span class="se">\&#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="se">\&#39;</span><span class="s2">, the value of &#39;input_x&#39; must be non-Tensor, but got </span><span class="si">{</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">x_value</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the value of &#39;input_x&#39; can not be None, but got </span><span class="si">{</span><span class="n">x_value</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;shape&quot;</span><span class="p">,</span> <span class="n">x_shp</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">shp</span> <span class="ow">in</span> <span class="n">x_shp</span><span class="p">:</span>
            <span class="k">if</span> <span class="n">shp</span><span class="p">:</span>
                <span class="n">x_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">x_value</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">int64</span><span class="p">)</span><span class="o">.</span><span class="n">shape</span><span class="p">)</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For </span><span class="se">\&#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="se">\&#39;</span><span class="s2">, the dimension of &#39;input_x&#39; must be 1, but got </span><span class="si">{</span><span class="n">x_rank</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">value</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">x_value</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;input[</span><span class="si">%d</span><span class="s2">]&quot;</span> <span class="o">%</span> <span class="n">i</span><span class="p">,</span> <span class="n">value</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">z</span> <span class="o">=</span> <span class="p">[</span><span class="n">x_value</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">x_value</span><span class="p">))]</span>
        <span class="n">z</span><span class="o">.</span><span class="n">sort</span><span class="p">()</span>

        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">z</span><span class="p">)):</span>
            <span class="k">if</span> <span class="n">z</span><span class="p">[</span><span class="n">i</span> <span class="o">-</span> <span class="mi">1</span><span class="p">]</span> <span class="o">==</span> <span class="n">z</span><span class="p">[</span><span class="n">i</span><span class="p">]:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the &#39;input_x&#39; can not contain duplicate values, &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;but got duplicated </span><span class="si">{</span><span class="n">z</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="si">}</span><span class="s2"> in the &#39;input_x&#39;.&quot;</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="sa">f</span><span class="s1">&#39;value min&#39;</span><span class="p">,</span> <span class="nb">min</span><span class="p">(</span><span class="n">x_value</span><span class="p">),</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="sa">f</span><span class="s1">&#39;value max&#39;</span><span class="p">,</span> <span class="nb">max</span><span class="p">(</span><span class="n">x_value</span><span class="p">),</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_value</span><span class="p">)</span> <span class="o">-</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

        <span class="n">y</span> <span class="o">=</span> <span class="p">[</span><span class="kc">None</span><span class="p">]</span> <span class="o">*</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_value</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">value</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">x_value</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;input[</span><span class="si">%d</span><span class="s2">]&quot;</span> <span class="o">%</span> <span class="n">i</span><span class="p">,</span> <span class="n">value</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="sa">f</span><span class="s1">&#39;value&#39;</span><span class="p">,</span> <span class="n">z</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="sa">f</span><span class="s1">&#39;index&#39;</span><span class="p">,</span> <span class="n">i</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">y</span><span class="p">[</span><span class="n">value</span><span class="p">]</span> <span class="o">=</span> <span class="n">i</span>
            <span class="n">z</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">value</span><span class="p">)</span>
        <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">x_shp</span><span class="p">,</span>
                <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">y</span><span class="p">)}</span></div>


<span class="k">class</span> <span class="nc">Argmax</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns the indices of the maximum value of a tensor across the axis.</span>

<span class="sd">    If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor will be</span>
<span class="sd">    :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (int): Axis where the Argmax operation applies to. Default: -1.</span>
<span class="sd">        output_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.</span>
<span class="sd">            Default: `mindspore.dtype.int32`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - Input tensor. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.</span>
<span class="sd">          Support data type list as follows:</span>

<span class="sd">          - Ascend: Float16, Float32.</span>
<span class="sd">          - GPU: Float16, Float32.</span>
<span class="sd">          - CPU: Float16, Float32, Float64.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, indices of the max value of input tensor across the axis.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `axis` is not an int.</span>
<span class="sd">        TypeError: If `output_type` is neither int32 nor int64.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; output = ops.Argmax(output_type=mindspore.int32)(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [1 0 0]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=-</span><span class="mi">1</span><span class="p">,</span> <span class="n">output_type</span><span class="o">=</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Argmax&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_types_same_and_valid</span><span class="p">({</span><span class="s1">&#39;output&#39;</span><span class="p">:</span> <span class="n">output_type</span><span class="p">},</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;output_type&#39;</span><span class="p">,</span> <span class="n">output_type</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="n">axis</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span>
        <span class="k">if</span> <span class="n">axis</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">axis</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="n">x_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="n">axis</span><span class="p">,</span> <span class="o">-</span><span class="n">x_rank</span><span class="p">,</span> <span class="n">x_rank</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_LEFT</span><span class="p">,</span> <span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span> <span class="o">+</span> <span class="n">x_rank</span> <span class="k">if</span> <span class="n">axis</span> <span class="o">&lt;</span> <span class="mi">0</span> <span class="k">else</span> <span class="n">axis</span>
        <span class="n">ouput_shape</span> <span class="o">=</span> <span class="p">[</span><span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">x_rank</span><span class="p">)</span> <span class="k">if</span> <span class="n">i</span> <span class="o">!=</span> <span class="n">axis</span><span class="p">]</span>
        <span class="k">return</span> <span class="n">ouput_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;input_x&quot;</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">float16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float64</span><span class="p">],</span>
                                           <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">output_type</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">Argmin</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns the indices of the minimum value of a tensor across the axis.</span>

<span class="sd">    If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is</span>
<span class="sd">    :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (int): Axis where the Argmin operation applies to. Default: -1.</span>
<span class="sd">        output_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.</span>
<span class="sd">            Default: `mindspore.dtype.int32`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - Input tensor.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, indices of the min value of input tensor across the axis.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `axis` is not an int.</span>
<span class="sd">        TypeError: If `output_type` is neither int32 nor int64.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; index = ops.Argmin()(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(index)</span>
<span class="sd">        2</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=-</span><span class="mi">1</span><span class="p">,</span> <span class="n">output_type</span><span class="o">=</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Argmin&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_type_name</span><span class="p">(</span><span class="s2">&quot;output_type&quot;</span><span class="p">,</span> <span class="n">output_type</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;output_type&#39;</span><span class="p">,</span> <span class="n">output_type</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="n">axis</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span>
        <span class="k">if</span> <span class="n">axis</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">axis</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="n">x_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="n">axis</span><span class="p">,</span> <span class="o">-</span><span class="n">x_rank</span><span class="p">,</span> <span class="n">x_rank</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_LEFT</span><span class="p">,</span> <span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span> <span class="o">+</span> <span class="n">x_rank</span> <span class="k">if</span> <span class="n">axis</span> <span class="o">&lt;</span> <span class="mi">0</span> <span class="k">else</span> <span class="n">axis</span>
        <span class="n">ouput_shape</span> <span class="o">=</span> <span class="p">[</span><span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">x_rank</span><span class="p">)</span> <span class="k">if</span> <span class="n">i</span> <span class="o">!=</span> <span class="n">axis</span><span class="p">]</span>
        <span class="k">return</span> <span class="n">ouput_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;input_x&quot;</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">output_type</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">ArgMaxWithValue</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Calculates the maximum value with the corresponding index.</span>

<span class="sd">    Calculates the maximum value along with the given axis for the input tensor. It returns the maximum values and</span>
<span class="sd">    indices.</span>

<span class="sd">    Note:</span>
<span class="sd">        In auto_parallel and semi_auto_parallel mode, the first output index can not be used.</span>

<span class="sd">    .. warning::</span>
<span class="sd">        - If there are multiple maximum values, the index of the first maximum value is used.</span>
<span class="sd">        - The value range of &quot;axis&quot; is [-dims, dims - 1]. &quot;dims&quot; is the dimension length of &quot;input_x&quot;.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (int): The dimension to reduce. Default: 0.</span>
<span class="sd">        keep_dims (bool): Whether to reduce dimension, if true, the output will keep same dimension with the input,</span>
<span class="sd">                          the output will reduce dimension if false. Default: False.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as</span>
<span class="sd">          :math:`(x_1, x_2, ..., x_N)`. And the data type only support mindspore.float16 or float32.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input</span>
<span class="sd">        tensor.</span>
<span class="sd">        - index (Tensor) - The index for the maximum value of the input tensor. If `keep_dims` is true, the shape of</span>
<span class="sd">        output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is</span>
<span class="sd">        :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.</span>
<span class="sd">        - output_x (Tensor) - The maximum value of input tensor, with the same shape as index.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `keep_dims` is not a bool.</span>
<span class="sd">        TypeError: If `axis` is not an int.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; index, output = ops.ArgMaxWithValue()(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(index, output)</span>
<span class="sd">        3 0.7</span>
<span class="sd">        &gt;&gt;&gt; index, output = ops.ArgMaxWithValue(keep_dims=True)(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(index, output)</span>
<span class="sd">        [3] [0.7]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">keep_dims</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ArgMaxWithValue&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">keep_dims</span> <span class="o">=</span> <span class="n">keep_dims</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;keep_dims&#39;</span><span class="p">,</span> <span class="n">keep_dims</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;axis&#39;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="n">axis</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span>
        <span class="n">x_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="n">axis</span><span class="p">,</span> <span class="o">-</span><span class="n">x_rank</span><span class="p">,</span> <span class="n">x_rank</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_LEFT</span><span class="p">,</span> <span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">ouput_shape</span> <span class="o">=</span> <span class="n">_infer_shape_reduce</span><span class="p">(</span><span class="n">x_shape</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">keep_dims</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">ouput_shape</span><span class="p">,</span> <span class="n">ouput_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;input_x&quot;</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">),</span> <span class="n">x_dtype</span>


<span class="k">class</span> <span class="nc">ArgMinWithValue</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Calculates the minimum value with corresponding index, and returns indices and values.</span>

<span class="sd">    Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and</span>
<span class="sd">    indices.</span>

<span class="sd">    Note:</span>
<span class="sd">        In auto_parallel and semi_auto_parallel mode, the first output index can not be used.</span>

<span class="sd">    .. warning::</span>
<span class="sd">        - If there are multiple minimum values, the index of the first minimum value is used.</span>
<span class="sd">        - The value range of &quot;axis&quot; is [-dims, dims - 1]. &quot;dims&quot; is the dimension length of &quot;input_x&quot;.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (int): The dimension to reduce. Default: 0.</span>
<span class="sd">        keep_dims (bool): Whether to reduce dimension, if true the output will keep the same dimension as the input,</span>
<span class="sd">                          the output will reduce dimension if false. Default: False.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as</span>
<span class="sd">          :math:`(x_1, x_2, ..., x_N)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input</span>
<span class="sd">        tensor.</span>
<span class="sd">        - index (Tensor) - The index for the minimum value of the input tensor. If `keep_dims` is true, the shape of</span>
<span class="sd">        output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is</span>
<span class="sd">        :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.</span>
<span class="sd">        - output_x (Tensor) - The minimum value of input tensor, with the same shape as index.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `keep_dims` is not a bool.</span>
<span class="sd">        TypeError: If `axis` is not an int.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = ops.ArgMinWithValue()(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[], dtype=Int32, value= 0), Tensor(shape=[], dtype=Float32, value= 0))</span>
<span class="sd">        &gt;&gt;&gt; output = ops.ArgMinWithValue(keep_dims=True)(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[1], dtype=Int32, value= [0]), Tensor(shape=[1], dtype=Float32, value= [ 0.00000000e+00]))</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">keep_dims</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ArgMinWithValue&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">keep_dims</span> <span class="o">=</span> <span class="n">keep_dims</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;keep_dims&#39;</span><span class="p">,</span> <span class="n">keep_dims</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;axis&#39;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="n">axis</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span>
        <span class="n">x_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="n">axis</span><span class="p">,</span> <span class="o">-</span><span class="n">x_rank</span><span class="p">,</span> <span class="n">x_rank</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_LEFT</span><span class="p">,</span> <span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">ouput_shape</span> <span class="o">=</span> <span class="n">_infer_shape_reduce</span><span class="p">(</span><span class="n">x_shape</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">keep_dims</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">ouput_shape</span><span class="p">,</span> <span class="n">ouput_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;input_x&quot;</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">),</span> <span class="n">x_dtype</span>


<span class="k">class</span> <span class="nc">Tile</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Replicates a tensor with given multiples times.</span>

<span class="sd">    Creates a new tensor by replicating `input_x` `multiples` times. The i&#39;th dimension of</span>
<span class="sd">    output tensor has `input_x.shape(i) * multiples[i]` elements, and the values of `input_x`</span>
<span class="sd">    are replicated `multiples[i]` times along the i&#39;th dimension.</span>

<span class="sd">    Note:</span>
<span class="sd">        The length of `multiples` must be greater or equal to the length of dimension in `input_x`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - 1-D or higher Tensor. Set the shape of input tensor as</span>
<span class="sd">          :math:`(x_1, x_2, ..., x_S)`.</span>

<span class="sd">        - **multiples** (tuple[int]) - The input tuple is constructed by multiple</span>
<span class="sd">          integers, i.e., :math:`(y_1, y_2, ..., y_S)`. The length of `multiples`</span>
<span class="sd">          cannot be smaller than the length of the shape of `input_x`.</span>
<span class="sd">          Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same data type as the `input_x`.</span>

<span class="sd">        - If the length of `multiples` is the same as the length of shape of `input_x`,</span>
<span class="sd">          then the shape of their corresponding positions can be multiplied, and</span>
<span class="sd">          the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_R)`.</span>
<span class="sd">        - If the length of `multiples` is larger than the length of shape of `input_x`,</span>
<span class="sd">          fill in multiple 1 in the length of the shape of `input_x` until their lengths are consistent.</span>
<span class="sd">          Such as set the shape of `input_x` as :math:`(1, ..., x_1, x_2, ..., x_S)`,</span>
<span class="sd">          then the shape of their corresponding positions can be multiplied, and</span>
<span class="sd">          the shape of Outputs is :math:`(1*y_1, ..., x_S*y_R)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `multiples` is not a tuple or its elements are not all int.</span>
<span class="sd">        ValueError: If the elements of `multiples` are not all greater than 0.</span>
<span class="sd">        ValueError: If the length of `multiples` are smaller than the length of dimension in `input_x`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; tile = ops.Tile()</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; multiples = (2, 3)</span>
<span class="sd">        &gt;&gt;&gt; output = tile(input_x, multiples)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1.  2.  1.  2.  1.  2.]</span>
<span class="sd">         [3.  4.  3.  4.  3.  4.]</span>
<span class="sd">         [1.  2.  1.  2.  1.  2.]</span>
<span class="sd">         [3.  4.  3.  4.  3.  4.]]</span>
<span class="sd">        &gt;&gt;&gt; multiples = (2, 3, 2)</span>
<span class="sd">        &gt;&gt;&gt; output = tile(input_x, multiples)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[1. 2. 1. 2.]</span>
<span class="sd">          [3. 4. 3. 4.]</span>
<span class="sd">          [1. 2. 1. 2.]</span>
<span class="sd">          [3. 4. 3. 4.]</span>
<span class="sd">          [1. 2. 1. 2.]</span>
<span class="sd">          [3. 4. 3. 4.]]</span>
<span class="sd">         [[1. 2. 1. 2.]</span>
<span class="sd">          [3. 4. 3. 4.]</span>
<span class="sd">          [1. 2. 1. 2.]</span>
<span class="sd">          [3. 4. 3. 4.]</span>
<span class="sd">          [1. 2. 1. 2.]</span>
<span class="sd">          [3. 4. 3. 4.]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Tile&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;multiples&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">check_elim</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">base_tensor</span><span class="p">,</span> <span class="n">multiplier</span><span class="p">):</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">base_tensor</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the type of &#39;input_x&#39; should be Tensor, &quot;</span>
                            <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="nb">type</span><span class="p">(</span><span class="n">base_tensor</span><span class="p">)</span><span class="o">.</span><span class="vm">__name__</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">all</span><span class="p">(</span><span class="n">v</span> <span class="o">==</span> <span class="mi">1</span> <span class="k">for</span> <span class="n">v</span> <span class="ow">in</span> <span class="n">multiplier</span><span class="p">)</span> <span class="ow">and</span> <span class="nb">len</span><span class="p">(</span><span class="n">base_tensor</span><span class="o">.</span><span class="n">shape</span><span class="p">)</span> <span class="o">&gt;=</span> <span class="nb">len</span><span class="p">(</span><span class="n">multiplier</span><span class="p">):</span>
            <span class="k">return</span> <span class="p">(</span><span class="kc">True</span><span class="p">,</span> <span class="n">base_tensor</span><span class="p">)</span>
        <span class="k">return</span> <span class="p">(</span><span class="kc">False</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">multiples</span><span class="p">):</span>
        <span class="n">multiples_v</span> <span class="o">=</span> <span class="n">multiples</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="k">if</span> <span class="n">multiples_v</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">multiples</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">])</span> <span class="o">!=</span> <span class="mi">1</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s1">&#39;For </span><span class="se">\&#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="se">\&#39;</span><span class="s1"> the dim of multiples must be 1.&#39;</span><span class="p">)</span>
            <span class="n">rank</span> <span class="o">=</span> <span class="nb">max</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]),</span> <span class="n">multiples</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">][</span><span class="mi">0</span><span class="p">])</span>
            <span class="n">out_shape</span> <span class="o">=</span> <span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="n">rank</span>
            <span class="c1"># tile can&#39;t infer min/max shape if multiples_v is None</span>
            <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">out_shape</span><span class="p">,</span>
                    <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                    <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">,</span>
                    <span class="s1">&#39;min_shape&#39;</span><span class="p">:</span> <span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="n">rank</span><span class="p">,</span>
                    <span class="s1">&#39;max_shape&#39;</span><span class="p">:</span> <span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="n">rank</span>
                    <span class="p">}</span>

        <span class="n">x_shp</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span>
            <span class="s2">&quot;multiples&quot;</span><span class="p">,</span> <span class="n">multiples_v</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">multiple</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">multiples_v</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span>
                <span class="n">multiple</span><span class="p">,</span> <span class="s2">&quot;multiples[</span><span class="si">%d</span><span class="s2">]&quot;</span> <span class="o">%</span> <span class="n">i</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span>
            <span class="s2">&quot;x[</span><span class="se">\&#39;</span><span class="s2">dtype</span><span class="se">\&#39;</span><span class="s2">]&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s2">&quot;dtype&quot;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">len_sub</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">multiples_v</span><span class="p">)</span> <span class="o">-</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shp</span><span class="p">)</span>
        <span class="n">multiples_w</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="k">if</span> <span class="n">len_sub</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
            <span class="n">multiples_w</span> <span class="o">=</span> <span class="n">multiples_v</span>
        <span class="k">if</span> <span class="n">len_sub</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
            <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">len_sub</span><span class="p">):</span>
                <span class="n">x_shp</span><span class="o">.</span><span class="n">insert</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
            <span class="n">multiples_w</span> <span class="o">=</span> <span class="n">multiples_v</span>
        <span class="k">elif</span> <span class="n">len_sub</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the length of &#39;multiples&#39; can not be smaller than &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;the dimension of &#39;input_x&#39;, but got length of &#39;multiples&#39;: </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">multiples_v</span><span class="p">)</span><span class="si">}</span><span class="s2"> &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;and dimension of &#39;input_x&#39;: </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">x_shp</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">a</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">multiples_w</span><span class="p">):</span>
            <span class="n">x_shp</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">*=</span> <span class="n">a</span>
        <span class="n">value</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="k">if</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">value</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">tile</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">(),</span> <span class="n">multiples_w</span><span class="p">))</span>
        <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">x_shp</span><span class="p">,</span>
                <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">value</span><span class="p">}</span>


<div class="viewcode-block" id="UnsortedSegmentSum"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.UnsortedSegmentSum.html#mindspore.ops.UnsortedSegmentSum">[docs]</a><span class="k">class</span> <span class="nc">UnsortedSegmentSum</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Computes the sum of a tensor along segments.</span>

<span class="sd">    Calculates a tensor such that :math:`\text{output}[i] = \sum_{segment\_ids[j] == i} \text{data}[j, \ldots]`, where</span>
<span class="sd">    :math:`j` is a tuple describing the index of element in data.  `segment_ids` selects which elements in data to sum</span>
<span class="sd">    up. Segment_ids does not need to be sorted, and it does not need to cover all values in the entire valid value</span>
<span class="sd">    range.</span>

<span class="sd">    The following figure shows the calculation process of UnsortedSegmentSum:</span>

<span class="sd">    .. image:: api_img/UnsortedSegmentSum.png</span>

<span class="sd">    Note:</span>
<span class="sd">        - If the segment_id i is absent in the segment_ids, then output[i] will be filled with 0.</span>
<span class="sd">        - On Ascend, if the value of segment_id is less than 0 or greater than the length of the input data shape, an</span>
<span class="sd">          execution error will occur.</span>

<span class="sd">    If the sum of the given segment_ids :math:`i` is empty, then :math:`\text{output}[i] = 0`. If the given segment_ids</span>
<span class="sd">    is negative, the value will be ignored. &#39;num_segments&#39; must be equal to the number of different segment_ids.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">        - **segment_ids** (Tensor) - Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 &lt; N &lt;= R.</span>
<span class="sd">        - **num_segments** (int) - Set :math:`z` as num_segments.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `num_segments` is not an int.</span>
<span class="sd">        ValueError: If length of shape of `segment_ids` is less than 1.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor([1, 2, 3, 4], mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; segment_ids = Tensor([0, 0, 1, 2], mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; num_segments = 4</span>
<span class="sd">        &gt;&gt;&gt; output = ops.UnsortedSegmentSum()(input_x, segment_ids, num_segments)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [3. 3. 4. 0.]</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor([1, 2, 3, 4, 2, 5], mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; segment_ids = Tensor([0, 0, 1, 2, 3, 4], mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; num_segments = 6</span>
<span class="sd">        &gt;&gt;&gt; output = ops.UnsortedSegmentSum()(input_x, segment_ids, num_segments)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [3. 3. 4. 2. 5. 0.]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize UnsortedSegmentSum&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;segment_ids&#39;</span><span class="p">,</span> <span class="s1">&#39;num_segments&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">segment_ids</span><span class="p">,</span> <span class="n">num_segments</span><span class="p">):</span>
        <span class="n">x_type</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="n">x_shp</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;input_x&quot;</span><span class="p">,</span> <span class="n">x_type</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;x_shape&quot;</span><span class="p">,</span> <span class="n">x_shp</span><span class="p">,</span> <span class="p">[</span><span class="nb">list</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">x_shp_len</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shp</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">x_shp_len</span><span class="p">,</span> <span class="s2">&quot;rank of input_x&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">segment_ids_shp</span> <span class="o">=</span> <span class="n">segment_ids</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">segment_ids_type</span> <span class="o">=</span> <span class="n">segment_ids</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;segment_ids&quot;</span><span class="p">,</span> <span class="n">segment_ids_type</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;segment_ids&quot;</span><span class="p">,</span> <span class="n">segment_ids_shp</span><span class="p">,</span> <span class="p">[</span><span class="nb">list</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">segment_ids_shp_len</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">segment_ids_shp</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">segment_ids_shp_len</span><span class="p">,</span> <span class="s2">&quot;rank of segment_ids&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="sa">f</span><span class="s1">&#39;rank of input_x&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shp</span><span class="p">),</span>
                        <span class="s1">&#39;rank of segments_id&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">segment_ids_shp</span><span class="p">),</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">x_shp</span> <span class="ow">and</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">segment_ids_shp</span><span class="p">:</span>
            <span class="c1"># only validate when both shapes fully known</span>
            <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">value</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">segment_ids_shp</span><span class="p">):</span>
                <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;ids[</span><span class="si">%d</span><span class="s2">]&quot;</span> <span class="o">%</span> <span class="n">i</span><span class="p">,</span> <span class="n">value</span><span class="p">,</span> <span class="s1">&#39;input[</span><span class="si">%d</span><span class="s1">]&#39;</span> <span class="o">%</span> <span class="n">i</span><span class="p">,</span> <span class="n">x_shp</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">num_segments_v</span> <span class="o">=</span> <span class="n">num_segments</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">num_segments_type</span> <span class="o">=</span> <span class="n">num_segments</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;num_segments&quot;</span><span class="p">,</span> <span class="n">num_segments_type</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">num_segments_type</span><span class="p">,</span> <span class="nb">type</span><span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">)):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;num_segments&quot;</span><span class="p">,</span> <span class="n">num_segments_type</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span>
                                               <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">shp</span> <span class="o">=</span> <span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;num_segments&#39;</span><span class="p">,</span> <span class="n">num_segments_v</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">num_segments_v</span><span class="p">,</span> <span class="s2">&quot;num_segments&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">shp</span> <span class="o">=</span> <span class="p">[</span><span class="n">num_segments_v</span><span class="p">]</span>

        <span class="n">shp</span> <span class="o">+=</span> <span class="n">x_shp</span><span class="p">[</span><span class="n">segment_ids_shp_len</span><span class="p">:]</span>
        <span class="k">if</span> <span class="s2">&quot;max_value&quot;</span> <span class="ow">in</span> <span class="n">num_segments</span> <span class="ow">and</span> <span class="s2">&quot;min_value&quot;</span> <span class="ow">in</span> <span class="n">num_segments</span><span class="p">:</span>
            <span class="n">output_max_shape</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">num_segments</span><span class="p">[</span><span class="s1">&#39;max_value&#39;</span><span class="p">])</span>
            <span class="n">output_min_shape</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">num_segments</span><span class="p">[</span><span class="s1">&#39;min_value&#39;</span><span class="p">])</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">num_segments_type</span><span class="p">,</span> <span class="nb">type</span><span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">)):</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the dtype of &#39;num_segments&#39; only support int type &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;when it is not a dynamic value, but got type of &#39;num_segments&#39;: &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="n">num_segments_type</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
            <span class="n">output_max_shape</span> <span class="o">=</span> <span class="p">[</span><span class="n">num_segments_v</span><span class="p">]</span>
            <span class="n">output_min_shape</span> <span class="o">=</span> <span class="p">[</span><span class="n">num_segments_v</span><span class="p">]</span>
        <span class="k">if</span> <span class="s1">&#39;max_shape&#39;</span> <span class="ow">in</span> <span class="n">x</span> <span class="ow">and</span> <span class="s1">&#39;min_shape&#39;</span> <span class="ow">in</span> <span class="n">x</span><span class="p">:</span>
            <span class="n">max_output_incoming</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;max_shape&#39;</span><span class="p">]</span>
            <span class="n">min_output_incoming</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;min_shape&#39;</span><span class="p">]</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">max_output_incoming</span> <span class="o">=</span> <span class="n">x_shp</span>
            <span class="n">min_output_incoming</span> <span class="o">=</span> <span class="n">x_shp</span>
        <span class="n">output_max_shape</span> <span class="o">+=</span> <span class="n">max_output_incoming</span><span class="p">[</span><span class="n">segment_ids_shp_len</span><span class="p">:]</span>
        <span class="n">output_min_shape</span> <span class="o">+=</span> <span class="n">min_output_incoming</span><span class="p">[</span><span class="n">segment_ids_shp_len</span><span class="p">:]</span>
        <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">shp</span><span class="p">,</span>
                <span class="s1">&#39;max_shape&#39;</span><span class="p">:</span> <span class="n">output_max_shape</span><span class="p">,</span>
                <span class="s1">&#39;min_shape&#39;</span><span class="p">:</span> <span class="n">output_min_shape</span><span class="p">,</span>
                <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">(</span><span class="n">x_type</span><span class="o">.</span><span class="n">element_type</span><span class="p">()),</span>
                <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span></div>


<div class="viewcode-block" id="UnsortedSegmentMin"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.UnsortedSegmentMin.html#mindspore.ops.UnsortedSegmentMin">[docs]</a><span class="k">class</span> <span class="nc">UnsortedSegmentMin</span><span class="p">(</span><span class="n">PrimitiveWithCheck</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Computes the minimum of a tensor along segments.</span>

<span class="sd">    The following figure shows the calculation process of UnsortedSegmentMin:</span>

<span class="sd">    .. image:: api_img/UnsortedSegmentMin.png</span>

<span class="sd">    .. math::</span>

<span class="sd">        \text { output }_i=\text{min}_{j \ldots} \text { data }[j \ldots]</span>

<span class="sd">    where :math:`min` over tuples :math:`j...` such that :math:`segment_ids[j...] == i`.</span>

<span class="sd">    Note:</span>
<span class="sd">        If the segment_id i is absent in the segment_ids, then output[i] will be filled with</span>
<span class="sd">        the maximum value of the input_x&#39;s type.</span>
<span class="sd">        The `segment_ids` must be non-negative tensor.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">          The data type must be float16, float32 or int32.</span>
<span class="sd">        - **segment_ids** (Tensor) - A `1-D` tensor whose shape is :math:`(x_1)`, the value must be non-negative tensor.</span>
<span class="sd">          The data type must be int32.</span>
<span class="sd">        - **num_segments** (int) - The value specifies the number of distinct `segment_ids`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `num_segments` is not an int.</span>
<span class="sd">        ValueError: If length of shape of `segment_ids` is not equal to 1.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; num_segments = 2</span>
<span class="sd">        &gt;&gt;&gt; unsorted_segment_min = ops.UnsortedSegmentMin()</span>
<span class="sd">        &gt;&gt;&gt; output = unsorted_segment_min(input_x, segment_ids, num_segments)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 2. 3.]</span>
<span class="sd">         [4. 2. 1.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize UnsortedSegmentMin&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;segment_ids&#39;</span><span class="p">,</span> <span class="s1">&#39;num_segments&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">__check__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">segment_ids</span><span class="p">,</span> <span class="n">num_segments</span><span class="p">):</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">segment_ids_shape</span> <span class="o">=</span> <span class="n">segment_ids</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">valid_type</span> <span class="o">=</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">float16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">valid_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;segment_ids&quot;</span><span class="p">,</span> <span class="n">segment_ids</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_equal_int</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">segment_ids_shape</span><span class="p">),</span> <span class="mi">1</span><span class="p">,</span> <span class="s2">&quot;rank of segment_ids_shape&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">num_segments_type</span> <span class="o">=</span> <span class="n">num_segments</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;num_segments&quot;</span><span class="p">,</span> <span class="n">num_segments_type</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">number</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">x_shape</span> <span class="ow">and</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">segment_ids_shape</span><span class="p">:</span>
            <span class="c1"># only validate when both shapes fully known</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="sa">f</span><span class="s1">&#39;first shape of input_x&#39;</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
                            <span class="s1">&#39;length of segments_id&#39;</span><span class="p">,</span> <span class="n">segment_ids_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">num_segments_v</span> <span class="o">=</span> <span class="n">num_segments</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;num_segments&#39;</span><span class="p">,</span> <span class="n">num_segments_v</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">num_segments_v</span><span class="p">,</span> <span class="s2">&quot;num_segments&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span></div>


<div class="viewcode-block" id="UnsortedSegmentMax"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.UnsortedSegmentMax.html#mindspore.ops.UnsortedSegmentMax">[docs]</a><span class="k">class</span> <span class="nc">UnsortedSegmentMax</span><span class="p">(</span><span class="n">PrimitiveWithCheck</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Computes the maximum along segments of a tensor.</span>

<span class="sd">    The following figure shows the calculation process of UnsortedSegmentMax:</span>

<span class="sd">    .. image:: api_img/UnsortedSegmentMax.png</span>

<span class="sd">    .. math::</span>

<span class="sd">        \text { output }_i=\text{max}_{j \ldots} \text { data }[j \ldots]</span>

<span class="sd">    where :math:`max` over tuples :math:`j...` such that :math:`segment\_ids[j...] == i`.</span>

<span class="sd">    Note:</span>
<span class="sd">        If the segment_id i is absent in the segment_ids, then output[i] will be filled with</span>
<span class="sd">        the minimum value of the input_x&#39;s type.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">          The data type must be float16, float32 or int32.</span>
<span class="sd">        - **segment_ids** (Tensor) - A `1-D` tensor whose shape is :math:`(x_1)`, the value must be non-negative tensor.</span>
<span class="sd">          The data type must be int32.</span>
<span class="sd">        - **num_segments** (int) - The value specifies the number of distinct `segment_ids`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `num_segments` is not an int.</span>
<span class="sd">        ValueError: If length of shape of `segment_ids` is not equal to 1.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; # case 1: Only have two num_segments, where is 0 and 1, and segment_ids=[0, 1, 1]</span>
<span class="sd">        &gt;&gt;&gt; # num_segments = 2 indicates that there are two types of segment_id,</span>
<span class="sd">        &gt;&gt;&gt; # the first number &#39;0&#39; in [0, 1, 1] indicates input_x[0],</span>
<span class="sd">        &gt;&gt;&gt; # the second number &#39;1&#39; in [0, 1, 1] indicates input_x[1],</span>
<span class="sd">        &gt;&gt;&gt; # the third number &#39;1&#39; in [0, 1, 1] indicates input_x[2],</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0], which is [1, 2, 3] will not be compared to other segment_id.</span>
<span class="sd">        &gt;&gt;&gt; # Only the same segment_id will be compared.</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import Tensor</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import ops</span>
<span class="sd">        &gt;&gt;&gt; import numpy as np</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; num_segments = 2</span>
<span class="sd">        &gt;&gt;&gt; unsorted_segment_max = ops.UnsortedSegmentMax()</span>
<span class="sd">        &gt;&gt;&gt; output = unsorted_segment_max(input_x, segment_ids, num_segments)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 2. 3.]</span>
<span class="sd">         [4. 5. 6.]]</span>
<span class="sd">        &gt;&gt;&gt;</span>
<span class="sd">        &gt;&gt;&gt; # case 2: The segment_ids=[0, 0, 1, 1].</span>
<span class="sd">        &gt;&gt;&gt; # [1, 2, 3] will compare with [4, 2, 0],</span>
<span class="sd">        &gt;&gt;&gt; # and [4, 5, 6] will compare with [4, 2, 1].</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[1, 2, 3], [4, 2, 0], [4, 5, 6], [4, 2, 1]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; segment_ids = Tensor(np.array([0, 0, 1, 1]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; num_segments = 2</span>
<span class="sd">        &gt;&gt;&gt; unsorted_segment_max = ops.UnsortedSegmentMax()</span>
<span class="sd">        &gt;&gt;&gt; output = unsorted_segment_max(input_x, segment_ids, num_segments)</span>
<span class="sd">        &gt;&gt;&gt; print(input_x.shape)</span>
<span class="sd">            (4, 3)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">            [[4. 2. 3.]</span>
<span class="sd">             [4. 5. 6.]]</span>
<span class="sd">        &gt;&gt;&gt; # case 3: If the input_x have three dimensions even more, what will happen?</span>
<span class="sd">        &gt;&gt;&gt; # The shape of input_x is (2, 4, 3),</span>
<span class="sd">        &gt;&gt;&gt; # and the length of segment_ids should be the same as the first dimension of input_x.</span>
<span class="sd">        &gt;&gt;&gt; # Because the segment_ids are different, input_x[0] will not be compared to input_x[1].</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[[1, 2, 3], [4, 2, 0], [4, 5, 6], [4, 2, 1]],</span>
<span class="sd">        ...                            [[1, 2, 3], [4, 2, 0], [4, 5, 6], [4, 2, 1]]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; segment_ids = Tensor(np.array([0, 1]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; num_segments = 2</span>
<span class="sd">        &gt;&gt;&gt; unsorted_segment_max = ops.UnsortedSegmentMax()</span>
<span class="sd">        &gt;&gt;&gt; output = unsorted_segment_max(input_x, segment_ids, num_segments)</span>
<span class="sd">        &gt;&gt;&gt; print(input_x.shape)</span>
<span class="sd">            (2, 4, 3)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">            [[[1. 2. 3.]</span>
<span class="sd">              [4. 2. 0.]</span>
<span class="sd">              [4. 5. 6.]</span>
<span class="sd">              [4. 2. 1.]]</span>
<span class="sd">             [[1. 2. 3.]</span>
<span class="sd">              [4. 2. 0.]</span>
<span class="sd">              [4. 5. 6.]</span>
<span class="sd">              [4. 2. 1.]]]</span>
<span class="sd">        &gt;&gt;&gt; # case 4: It has the same input with the 3rd case.</span>
<span class="sd">        &gt;&gt;&gt; # Because num_segments is equal to 2, there are two segment_ids, but currently only one 0 is used.</span>
<span class="sd">        &gt;&gt;&gt; # the segment_id i is absent in the segment_ids, then output[i] will be filled with</span>
<span class="sd">        &gt;&gt;&gt; # the smallest possible value of the input_x&#39;s type.</span>
<span class="sd">        &gt;&gt;&gt; segment_ids = Tensor(np.array([0, 0]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; output = unsorted_segment_max(input_x, segment_ids, num_segments)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">            [[[ 1.0000000e+00  2.0000000e+00  3.0000000e+00]</span>
<span class="sd">              [ 4.0000000e+00  2.0000000e+00  0.0000000e+00]</span>
<span class="sd">              [ 4.0000000e+00  5.0000000e+00  6.0000000e+00]</span>
<span class="sd">              [ 4.0000000e+00  2.0000000e+00  1.0000000e+00]]</span>
<span class="sd">             [[-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]</span>
<span class="sd">              [-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]</span>
<span class="sd">              [-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]</span>
<span class="sd">              [-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize UnsortedSegmentMax&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;segment_ids&#39;</span><span class="p">,</span> <span class="s1">&#39;num_segments&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">__check__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">segment_ids</span><span class="p">,</span> <span class="n">num_segments</span><span class="p">):</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">segment_ids_shape</span> <span class="o">=</span> <span class="n">segment_ids</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">valid_type</span> <span class="o">=</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">float16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">valid_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">({</span><span class="s2">&quot;segment_ids&quot;</span><span class="p">:</span> <span class="n">segment_ids</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]},</span>
                                                      <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_equal_int</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">segment_ids_shape</span><span class="p">),</span> <span class="mi">1</span><span class="p">,</span> <span class="s2">&quot;rank of segment_ids_shape&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">num_segments_type</span> <span class="o">=</span> <span class="n">num_segments</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;num_segments&quot;</span><span class="p">,</span> <span class="n">num_segments_type</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">number</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">x_shape</span> <span class="ow">and</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">segment_ids_shape</span><span class="p">:</span>
            <span class="c1"># only validate when both shapes fully known</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="sa">f</span><span class="s1">&#39;first shape of input_x&#39;</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
                            <span class="s1">&#39;length of segments_id&#39;</span><span class="p">,</span> <span class="n">segment_ids_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">num_segments_v</span> <span class="o">=</span> <span class="n">num_segments</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;num_segments&#39;</span><span class="p">,</span> <span class="n">num_segments_v</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">num_segments_v</span><span class="p">,</span> <span class="s2">&quot;num_segments&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span></div>


<div class="viewcode-block" id="UnsortedSegmentProd"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.UnsortedSegmentProd.html#mindspore.ops.UnsortedSegmentProd">[docs]</a><span class="k">class</span> <span class="nc">UnsortedSegmentProd</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Computes the product of a tensor along segments.</span>

<span class="sd">    The following figure shows the calculation process of UnsortedSegmentProd:</span>

<span class="sd">    .. image:: api_img/UnsortedSegmentProd.png</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">          With float16, float32 or int32 data type.</span>
<span class="sd">        - **segment_ids** (Tensor) - A `1-D` tensor whose shape is :math:`(x_1)`, the value must be non-negative tensor.</span>
<span class="sd">          Data type must be int32.</span>
<span class="sd">        - **num_segments** (int) - The value specifies the number of distinct `segment_ids`,</span>
<span class="sd">          must be greater than 0.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `num_segments` is not an int.</span>
<span class="sd">        ValueError: If length of shape of `segment_ids` is not equal to 1.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; segment_ids = Tensor(np.array([0, 1, 0]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; num_segments = 2</span>
<span class="sd">        &gt;&gt;&gt; unsorted_segment_prod = ops.UnsortedSegmentProd()</span>
<span class="sd">        &gt;&gt;&gt; output = unsorted_segment_prod(input_x, segment_ids, num_segments)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[4. 4. 3.]</span>
<span class="sd">         [4. 5. 6.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize UnsortedSegmentProd&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;segment_ids&#39;</span><span class="p">,</span> <span class="s1">&#39;num_segments&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">segment_ids</span><span class="p">,</span> <span class="n">num_segments</span><span class="p">):</span>
        <span class="n">x_type</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">segment_ids_shape</span> <span class="o">=</span> <span class="n">segment_ids</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;input_x&quot;</span><span class="p">,</span> <span class="n">x_type</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;x_shape&quot;</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="p">[</span><span class="nb">list</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">valid_type</span> <span class="o">=</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">float16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">valid_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;segment_ids&quot;</span><span class="p">,</span> <span class="n">segment_ids</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_equal_int</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">segment_ids_shape</span><span class="p">),</span> <span class="mi">1</span><span class="p">,</span> <span class="s2">&quot;rank of segment_ids_shape&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="sa">f</span><span class="s1">&#39;first shape of input_x&#39;</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
                        <span class="s1">&#39;length of segments_id&#39;</span><span class="p">,</span> <span class="n">segment_ids_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">num_segments_v</span> <span class="o">=</span> <span class="n">num_segments</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;num_segments&#39;</span><span class="p">,</span> <span class="n">num_segments_v</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">num_segments_v</span><span class="p">,</span> <span class="s2">&quot;num_segments&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">segment_ids_shape_len</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">segment_ids_shape</span><span class="p">)</span>
        <span class="n">out_shape</span> <span class="o">=</span> <span class="p">[</span><span class="n">num_segments_v</span><span class="p">]</span>
        <span class="n">out_shape</span> <span class="o">+=</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">segment_ids_shape_len</span><span class="p">:]</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">out_shape</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">(</span><span class="n">x_type</span><span class="o">.</span><span class="n">element_type</span><span class="p">()),</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span></div>


<span class="k">class</span> <span class="nc">Concat</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Connect tensor in the specified axis.</span>

<span class="sd">    Connect input tensors along with the given axis.</span>

<span class="sd">    The input data is a tuple of tensors. These tensors have the same rank `R`. Set the given axis as `m`, and</span>
<span class="sd">    :math:`0 \le m &lt; R`. Set the number of input tensors as `N`. For the :math:`i`-th tensor :math:`t_i`, it has</span>
<span class="sd">    the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`. :math:`x_{mi}` is the :math:`m`-th dimension of the</span>
<span class="sd">    :math:`i`-th tensor. Then, the shape of the output tensor is</span>

<span class="sd">    .. math::</span>

<span class="sd">        (x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)</span>

<span class="sd">    .. warning::</span>
<span class="sd">        The value range of &quot;axis&quot; is [-dims, dims - 1]. &quot;dims&quot; is the dimension length of &quot;input_x&quot;.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (int): The specified axis. Default: 0.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (tuple, list) - A tuple or a list of input tensors.</span>
<span class="sd">          Suppose there are two tensors in this tuple or list, namely x1 and x2.</span>
<span class="sd">          To perform `Concat` in the axis 0 direction, except for the 0th axis, all other axes should be equal,</span>
<span class="sd">          that is, :math:`x1.shape[1] == x2.shape[1], x1.shape[2] == x2.shape[2], ..., x1.shape[R] == x2.shape[R]`,</span>
<span class="sd">          where the :math:`R` indicates the last axis.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        - Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.</span>
<span class="sd">          The data type is the same with `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `axis` is not an int.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; op = ops.Concat()</span>
<span class="sd">        &gt;&gt;&gt; output = op((input_x1, input_x2))</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0. 1.]</span>
<span class="sd">         [2. 1.]</span>
<span class="sd">         [0. 1.]</span>
<span class="sd">         [2. 1.]]</span>
<span class="sd">        &gt;&gt;&gt; op = ops.Concat(1)</span>
<span class="sd">        &gt;&gt;&gt; output = op((input_x1, input_x2))</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0. 1. 0. 1.]</span>
<span class="sd">         [2. 1. 2. 1.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Concat&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_x</span><span class="p">):</span>
        <span class="n">axis</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span>
        <span class="n">x_shp</span> <span class="o">=</span> <span class="n">input_x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">x_type</span> <span class="o">=</span> <span class="n">input_x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="n">_</span><span class="p">,</span> <span class="n">all_shp</span><span class="p">,</span> <span class="n">_</span> <span class="o">=</span> <span class="n">get_concat_offset</span><span class="p">(</span><span class="n">x_shp</span><span class="p">,</span> <span class="n">x_type</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;inputNums&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shp</span><span class="p">))</span>
        <span class="n">ret_shp</span> <span class="o">=</span> <span class="n">x_shp</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">copy</span><span class="p">()</span>
        <span class="n">value</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="k">if</span> <span class="n">input_x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">value</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">concatenate</span><span class="p">([</span><span class="n">x</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">()</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">input_x</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]],</span> <span class="n">axis</span><span class="o">=</span><span class="n">axis</span><span class="p">))</span>
        <span class="n">ret_shp</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span> <span class="o">=</span> <span class="n">all_shp</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">ret_shp</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x_type</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">value</span><span class="p">}</span>
        <span class="k">if</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">in</span> <span class="n">x_shp</span><span class="p">[</span><span class="mi">0</span><span class="p">]:</span>
            <span class="n">x_min_shp</span> <span class="o">=</span> <span class="n">input_x</span><span class="p">[</span><span class="s1">&#39;min_shape&#39;</span><span class="p">]</span>
            <span class="n">ret_min_shp</span> <span class="o">=</span> <span class="n">x_min_shp</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">copy</span><span class="p">()</span>
            <span class="n">ret_min_shp</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span> <span class="o">=</span> <span class="mi">0</span>
            <span class="k">for</span> <span class="n">all_min_shp</span> <span class="ow">in</span> <span class="n">x_min_shp</span><span class="p">:</span>
                <span class="n">ret_min_shp</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span> <span class="o">+=</span> <span class="n">all_min_shp</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span>
            <span class="n">out</span><span class="p">[</span><span class="s1">&#39;min_shape&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">ret_min_shp</span>
            <span class="n">x_max_shp</span> <span class="o">=</span> <span class="n">input_x</span><span class="p">[</span><span class="s1">&#39;max_shape&#39;</span><span class="p">]</span>
            <span class="n">ret_max_shp</span> <span class="o">=</span> <span class="n">x_max_shp</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">copy</span><span class="p">()</span>
            <span class="n">ret_max_shp</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span> <span class="o">=</span> <span class="mi">0</span>
            <span class="k">for</span> <span class="n">all_max_shp</span> <span class="ow">in</span> <span class="n">x_max_shp</span><span class="p">:</span>
                <span class="n">ret_max_shp</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span> <span class="o">+=</span> <span class="n">all_max_shp</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span>
            <span class="n">out</span><span class="p">[</span><span class="s1">&#39;max_shape&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">ret_max_shp</span>
        <span class="k">return</span> <span class="n">out</span>


<div class="viewcode-block" id="ParallelConcat"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ParallelConcat.html#mindspore.ops.ParallelConcat">[docs]</a><span class="k">class</span> <span class="nc">ParallelConcat</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Concats tensor in the first dimension.</span>

<span class="sd">    Concats input tensors along with the first dimension.</span>

<span class="sd">    The difference between Concat and ParallelConcat is that Concat requires all of the inputs be computed</span>
<span class="sd">    before the operation will begin but doesn&#39;t require that the input shapes be known during graph construction.</span>
<span class="sd">    Parallel concat will copy pieces of the input into the output as they become available, in some situations</span>
<span class="sd">    this can provide a performance benefit.</span>

<span class="sd">    Note:</span>
<span class="sd">        The input tensors are all required to have size 1 in the first dimension.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **values** (tuple, list) - A tuple or a list of input tensors. The data type and shape of these</span>
<span class="sd">          tensors must be the same. The data type is Number except float64.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, data type is the same as `values`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If length of shape of `values` is less than 1.</span>
<span class="sd">        ValueError: The data type and shape of these tensors are not the same.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; data1 = Tensor(np.array([[0, 1]]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; data2 = Tensor(np.array([[2, 1]]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; op = ops.ParallelConcat()</span>
<span class="sd">        &gt;&gt;&gt; output = op((data1, data2))</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0 1]</span>
<span class="sd">         [2 1]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ParallelConcat&quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">values</span><span class="p">):</span>
        <span class="n">x_shp</span> <span class="o">=</span> <span class="n">values</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">x_type</span> <span class="o">=</span> <span class="n">values</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>

        <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">x_shp</span><span class="p">),</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">,</span> <span class="sa">f</span><span class="s1">&#39;x_shp length&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="sa">f</span><span class="s2">&quot;x_type[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">]&quot;</span><span class="p">:</span> <span class="n">elem</span> <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">elem</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">x_type</span><span class="p">)}</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span> <span class="o">+</span> <span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">bool_</span><span class="p">,),</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

        <span class="n">first_elem</span> <span class="o">=</span> <span class="n">x_shp</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">elem</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">x_shp</span><span class="p">[</span><span class="mi">1</span><span class="p">:]):</span>
            <span class="n">j</span> <span class="o">=</span> <span class="n">i</span> <span class="o">+</span> <span class="mi">1</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_equal_int</span><span class="p">(</span><span class="n">elem</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="mi">1</span><span class="p">,</span> <span class="sa">f</span><span class="s1">&#39;x_shp[</span><span class="si">{</span><span class="n">j</span><span class="si">}</span><span class="s1">][0]&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;x_shp[0] shape&quot;</span><span class="p">,</span> <span class="n">first_elem</span><span class="p">,</span> <span class="sa">f</span><span class="s2">&quot;x_shp[</span><span class="si">{</span><span class="n">j</span><span class="si">}</span><span class="s2">] shape&quot;</span><span class="p">,</span> <span class="n">elem</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

        <span class="n">ret_shp</span> <span class="o">=</span> <span class="n">x_shp</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">copy</span><span class="p">()</span>
        <span class="n">ret_shp</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shp</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;shape&#39;</span><span class="p">,</span> <span class="n">ret_shp</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;N&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shp</span><span class="p">))</span>

        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">ret_shp</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x_type</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span></div>


<span class="k">def</span> <span class="nf">_get_stack_shape</span><span class="p">(</span><span class="n">x_shape</span><span class="p">,</span> <span class="n">x_type</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="n">prim_name</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;for stack output shape&quot;&quot;&quot;</span>
    <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;shape&quot;</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">],</span> <span class="n">prim_name</span><span class="p">)</span>
    <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">),</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">,</span> <span class="s2">&quot;len of input_x&quot;</span><span class="p">,</span> <span class="n">prim_name</span><span class="p">)</span>
    <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;input_x[0]&quot;</span><span class="p">,</span> <span class="n">x_type</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="n">prim_name</span><span class="p">)</span>
    <span class="n">rank_base</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">])</span>
    <span class="n">n</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
    <span class="n">out_shape</span> <span class="o">=</span> <span class="n">x_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
    <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="n">axis</span><span class="p">,</span> <span class="o">-</span><span class="n">rank_base</span> <span class="o">-</span> <span class="mi">1</span><span class="p">,</span> <span class="n">rank_base</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_BOTH</span><span class="p">,</span> <span class="s1">&#39;axis&#39;</span><span class="p">,</span> <span class="n">prim_name</span><span class="p">)</span>
    <span class="k">if</span> <span class="n">axis</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
        <span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span> <span class="o">+</span> <span class="n">rank_base</span> <span class="o">+</span> <span class="mi">1</span>
    <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">n</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;x_type[</span><span class="si">%d</span><span class="s1">]&#39;</span> <span class="o">%</span> <span class="n">i</span><span class="p">,</span> <span class="n">x_type</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="s1">&#39;base&#39;</span><span class="p">,</span> <span class="n">x_type</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="n">prim_name</span><span class="p">,</span> <span class="ne">TypeError</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">!=</span> <span class="n">x_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For </span><span class="se">\&#39;</span><span class="si">{</span><span class="n">prim_name</span><span class="si">}</span><span class="se">\&#39;</span><span class="s2"> element </span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2"> shape in input can not pack with first element&quot;</span><span class="p">)</span>
    <span class="n">out_shape</span><span class="o">.</span><span class="n">insert</span><span class="p">(</span><span class="n">axis</span><span class="p">,</span> <span class="n">n</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">out_shape</span>


<span class="k">class</span> <span class="nc">Pack</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Same as operator Stack. Pack will be deprecated in the future.</span>
<span class="sd">    Please use Stack instead.</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@deprecated</span><span class="p">(</span><span class="s2">&quot;1.1&quot;</span><span class="p">,</span> <span class="s2">&quot;Stack&quot;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Pack&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">value</span><span class="p">):</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="n">value</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">x_type</span> <span class="o">=</span> <span class="n">value</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;num&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">))</span>
        <span class="n">all_shape</span> <span class="o">=</span> <span class="n">_get_stack_shape</span><span class="p">(</span><span class="n">x_shape</span><span class="p">,</span> <span class="n">x_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">all_shape</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x_type</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span>


<div class="viewcode-block" id="Stack"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Stack.html#mindspore.ops.Stack">[docs]</a><span class="k">class</span> <span class="nc">Stack</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Stacks a list of tensors in specified axis.</span>

<span class="sd">    Stacks the list of input tensors with the same rank `R`, output is a tensor of rank `(R+1)`.</span>

<span class="sd">    Given input tensors of shape :math:`(x_1, x_2, ..., x_R)`. Set the number of input tensors as `N`.</span>
<span class="sd">    If :math:`0 \le axis`, the shape of the output tensor is</span>
<span class="sd">    :math:`(x_1, x_2, ..., x_{axis}, N, x_{axis+1}, ..., x_R)`.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (int): Dimension to stack. Default: 0.</span>
<span class="sd">                    Negative values wrap around. The range is [-(R+1), R+1).</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Union[tuple, list]) - A Tuple or list of Tensor objects with the same shape and type.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor. A stacked Tensor with the same type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If the data types of elements in `input_x` are not the same.</span>
<span class="sd">        ValueError: If the length of `input_x` is not greater than 1;</span>
<span class="sd">                    or if axis is out of the range [-(R+1), R+1);</span>
<span class="sd">                    or if the shapes of elements in input_x are not the same.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; data1 = Tensor(np.array([0, 1]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; data2 = Tensor(np.array([2, 3]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; stack = ops.Stack()</span>
<span class="sd">        &gt;&gt;&gt; output = stack([data1, data2])</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0. 1.]</span>
<span class="sd">         [2. 3.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Stack&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">value</span><span class="p">):</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="n">value</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">x_type</span> <span class="o">=</span> <span class="n">value</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;num&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">))</span>
        <span class="n">all_shape</span> <span class="o">=</span> <span class="n">_get_stack_shape</span><span class="p">(</span><span class="n">x_shape</span><span class="p">,</span> <span class="n">x_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">tuple_value</span> <span class="o">=</span> <span class="n">value</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">input_array</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="n">infered_value</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="k">if</span> <span class="n">tuple_value</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="k">for</span> <span class="n">item</span> <span class="ow">in</span> <span class="n">tuple_value</span><span class="p">:</span>
                <span class="n">npy_item</span> <span class="o">=</span> <span class="n">item</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">()</span>
                <span class="n">input_array</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">npy_item</span><span class="p">)</span>
            <span class="n">infered_value</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">stack</span><span class="p">(</span><span class="n">input_array</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">))</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">all_shape</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x_type</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">infered_value</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span></div>


<span class="k">class</span> <span class="nc">Unpack</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Same as operator Unstack. Unpack will be deprecated in the future.</span>
<span class="sd">    Please use Unstack instead.</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@deprecated</span><span class="p">(</span><span class="s2">&quot;1.1&quot;</span><span class="p">,</span> <span class="s2">&quot;Unstack&quot;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span>
    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Unpack&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">])</span>
        <span class="n">dim</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">,</span> <span class="o">-</span><span class="n">dim</span><span class="p">,</span> <span class="n">dim</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_LEFT</span><span class="p">,</span> <span class="s1">&#39;axis value&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">+</span> <span class="n">dim</span>
        <span class="n">output_num</span> <span class="o">=</span> <span class="n">x_shape</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;num&quot;</span><span class="p">,</span> <span class="n">output_num</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">output_num</span><span class="p">,</span> <span class="s2">&quot;output_num&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;num&#39;</span><span class="p">,</span> <span class="n">output_num</span><span class="p">)</span>
        <span class="n">output_valid_check</span> <span class="o">=</span> <span class="n">x_shape</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">]</span> <span class="o">-</span> <span class="n">output_num</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="n">output_valid_check</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span>
                            <span class="s2">&quot;The dimension which to unstack divides output_num&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">out_shapes</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="n">out_dtypes</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="n">out_shape</span> <span class="o">=</span> <span class="n">x_shape</span><span class="p">[:</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">]</span> <span class="o">+</span> <span class="n">x_shape</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">+</span> <span class="mi">1</span><span class="p">:]</span>
        <span class="k">for</span> <span class="n">_</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">output_num</span><span class="p">):</span>
            <span class="n">out_shapes</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">tuple</span><span class="p">(</span><span class="n">out_shape</span><span class="p">))</span>
            <span class="n">out_dtypes</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">])</span>
        <span class="n">out_shapes</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">out_shapes</span><span class="p">)</span>
        <span class="n">out_dtypes</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">out_dtypes</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">out_shapes</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">out_dtypes</span><span class="p">,</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span>


<div class="viewcode-block" id="Unstack"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Unstack.html#mindspore.ops.Unstack">[docs]</a><span class="k">class</span> <span class="nc">Unstack</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Unstacks tensor in specified axis.</span>

<span class="sd">    Unstacks a tensor of rank `R` along axis dimension, output tensors will have rank `(R-1)`.</span>

<span class="sd">    Given a tensor of shape :math:`(x_1, x_2, ..., x_R)`. If :math:`0 \le axis`,</span>
<span class="sd">    the shape of tensor in output is :math:`(x_1, x_2, ..., x_{axis}, x_{axis+2}, ..., x_R)`.</span>

<span class="sd">    This is the opposite of pack.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (int): Dimension along which to pack. Default: 0.</span>
<span class="sd">                    Negative values wrap around. The range is [-R, R).</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">          A tensor to be unstacked and the rank of the tensor must be greater than 0.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        A tuple of tensors, the shape of each objects is the same.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; unstack = ops.Unstack()</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))</span>
<span class="sd">        &gt;&gt;&gt; output = unstack(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[4], dtype=Int64, value= [1, 1, 1, 1]), Tensor(shape=[4], dtype=Int64, value= [2, 2, 2, 2]))</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Unstack&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">])</span>
        <span class="n">dim</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">axis</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">axis</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="n">axis</span><span class="p">,</span> <span class="o">-</span><span class="n">dim</span><span class="p">,</span> <span class="n">dim</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_LEFT</span><span class="p">,</span> <span class="s1">&#39;axis value&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">axis</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
            <span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span> <span class="o">+</span> <span class="n">dim</span>
        <span class="n">output_num</span> <span class="o">=</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;num&quot;</span><span class="p">,</span> <span class="n">output_num</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">output_num</span><span class="p">,</span> <span class="s2">&quot;output_num&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;num&#39;</span><span class="p">,</span> <span class="n">output_num</span><span class="p">)</span>
        <span class="n">output_valid_check</span> <span class="o">=</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span> <span class="o">-</span> <span class="n">output_num</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="n">output_valid_check</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span>
                            <span class="s2">&quot;The dimension which to unstack divides output_num&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">out_shapes</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="n">out_dtypes</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="n">out_shape</span> <span class="o">=</span> <span class="n">x_shape</span><span class="p">[:</span><span class="n">axis</span><span class="p">]</span> <span class="o">+</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">axis</span> <span class="o">+</span> <span class="mi">1</span><span class="p">:]</span>
        <span class="k">for</span> <span class="n">_</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">output_num</span><span class="p">):</span>
            <span class="n">out_shapes</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">tuple</span><span class="p">(</span><span class="n">out_shape</span><span class="p">))</span>
            <span class="n">out_dtypes</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">])</span>
        <span class="n">out_shapes</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">out_shapes</span><span class="p">)</span>
        <span class="n">out_dtypes</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">out_dtypes</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">out_shapes</span><span class="p">,</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">out_dtypes</span><span class="p">,</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span></div>


<div class="viewcode-block" id="Slice"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Slice.html#mindspore.ops.Slice">[docs]</a><span class="k">class</span> <span class="nc">Slice</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Slices a tensor in the specified shape.</span>

<span class="sd">    Slice the tensor `input_x` in shape of `size` and starting at the location specified by `begin`,</span>
<span class="sd">    The slice `begin` represents the offset in each dimension of `input_x`,</span>
<span class="sd">    The slice `size` represents the size of the output tensor.</span>

<span class="sd">    Note that `begin` is zero-based and `size` is one-based.</span>

<span class="sd">    If `size[i]` is -1, all remaining elements in dimension i are included in the slice.</span>
<span class="sd">    This is equivalent to setting :math:`size[i] = input_x.shape(i) - begin[i]`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor): The target tensor.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.</span>
<span class="sd">        - **begin** (Union[tuple, list]): The beginning of the slice. Only constant value(&gt;=0) is allowed.</span>
<span class="sd">        - **size** (Union[tuple, list]): The size of the slice. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape is : input `size`, the data type is the same as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `begin` or `size` is neither tuple nor list.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]],</span>
<span class="sd">        ...                         [[3, 3, 3], [4, 4, 4]],</span>
<span class="sd">        ...                         [[5, 5, 5], [6, 6, 6]]]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; slice_op = ops.Slice()</span>
<span class="sd">        &gt;&gt;&gt; output = slice_op(data, (1, 0, 0), (1, 1, 3))</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[3 3 3]]]</span>
<span class="sd">        &gt;&gt;&gt; output = slice_op(data, (1, 0, 0), (1, 1, 2))</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[3 3]]]</span>
<span class="sd">        &gt;&gt;&gt; output = slice_op(data, (1, 0, 0), (1, 1, 1))</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[3]]]</span>
<span class="sd">        &gt;&gt;&gt; output = slice_op(data, (1, 1, 0), (1, 1, 3))</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[4 4 4]]]</span>
<span class="sd">        &gt;&gt;&gt; output = slice_op(data, (1, 0, 1), (1, 1, 2))</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[3 3]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize slice&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;begin&#39;</span><span class="p">,</span> <span class="s1">&#39;size&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">begin</span><span class="p">,</span> <span class="n">size</span><span class="p">):</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">x_shp_len</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">begin_v</span><span class="p">,</span> <span class="n">size_v</span> <span class="o">=</span> <span class="n">begin</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="n">size</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="k">if</span> <span class="n">begin_v</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">or</span> <span class="n">size_v</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="c1"># if size_v is not None and begin_v is None, it should be also a dynamic output shape.</span>
            <span class="k">if</span> <span class="n">size_v</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
                <span class="k">if</span> <span class="n">size</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">][</span><span class="mi">0</span><span class="p">]</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
                    <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the size shape haven&#39;t support dynamic yet.&quot;</span><span class="p">)</span>
                <span class="n">out_shape</span> <span class="o">=</span> <span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="n">size</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">][</span><span class="mi">0</span><span class="p">]</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">out_shape</span> <span class="o">=</span> <span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="nb">len</span><span class="p">(</span><span class="n">size_v</span><span class="p">)</span>
            <span class="k">if</span> <span class="s1">&#39;max_shape&#39;</span> <span class="ow">in</span> <span class="n">x</span><span class="p">:</span>
                <span class="n">max_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;max_shape&#39;</span><span class="p">]</span>
                <span class="n">min_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;min_shape&#39;</span><span class="p">]</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">min_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
                <span class="n">max_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
            <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">out_shape</span><span class="p">,</span>
                    <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                    <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">,</span>
                    <span class="s1">&#39;min_shape&#39;</span><span class="p">:</span> <span class="n">min_shape</span><span class="p">,</span>
                    <span class="s1">&#39;max_shape&#39;</span><span class="p">:</span> <span class="n">max_shape</span><span class="p">}</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_valid_input</span><span class="p">(</span><span class="s1">&#39;begin&#39;</span><span class="p">,</span> <span class="n">begin</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_valid_input</span><span class="p">(</span><span class="s1">&#39;size&#39;</span><span class="p">,</span> <span class="n">size</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;input begin&quot;</span><span class="p">,</span> <span class="n">begin_v</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;input size&quot;</span><span class="p">,</span> <span class="n">size_v</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">key</span><span class="p">,</span> <span class="n">value</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">((</span><span class="s1">&#39;begin&#39;</span><span class="p">,</span> <span class="s1">&#39;size&#39;</span><span class="p">),</span> <span class="p">(</span><span class="n">begin_v</span><span class="p">,</span> <span class="n">size_v</span><span class="p">)):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="sa">f</span><span class="s1">&#39;len of </span><span class="si">{</span><span class="n">key</span><span class="si">}</span><span class="s1">&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">value</span><span class="p">),</span>
                            <span class="s1">&#39;len x</span><span class="se">\&#39;</span><span class="s1">s dim&#39;</span><span class="p">,</span> <span class="n">x_shp_len</span><span class="p">)</span>
        <span class="n">size_v</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">size_v</span><span class="p">)</span>
        <span class="k">if</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">x_shape</span><span class="p">:</span>
            <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">x_shp_len</span><span class="p">):</span>
                <span class="k">if</span> <span class="n">size_v</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">==</span> <span class="o">-</span><span class="mi">1</span><span class="p">:</span>
                    <span class="n">size_v</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">-</span> <span class="n">begin_v</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
                <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">size_v</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="sa">f</span><span class="s1">&#39;input size[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s1">]&#39;</span><span class="p">)</span>
                <span class="n">validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">begin_v</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="sa">f</span><span class="s1">&#39;input begin[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s1">]&#39;</span><span class="p">)</span>
                <span class="k">if</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">&lt;</span> <span class="n">begin_v</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">+</span> <span class="n">size_v</span><span class="p">[</span><span class="n">i</span><span class="p">]:</span>
                    <span class="n">y</span> <span class="o">=</span> <span class="n">begin_v</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">+</span> <span class="n">size_v</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
                    <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the sliced shape can not be greater than origin shape, &quot;</span>
                                     <span class="sa">f</span><span class="s2">&quot;but got sliced shape is </span><span class="si">{</span><span class="n">y</span><span class="si">}</span><span class="s2">, and origin shape is </span><span class="si">{</span><span class="n">x_shape</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">size_v</span><span class="p">,</span>
                <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span></div>


<div class="viewcode-block" id="ReverseV2"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ReverseV2.html#mindspore.ops.ReverseV2">[docs]</a><span class="k">class</span> <span class="nc">ReverseV2</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Reverses specific dimensions of a tensor.</span>

<span class="sd">    .. warning::</span>
<span class="sd">        The value range of &quot;axis&quot; is [-dims, dims - 1]. &quot;dims&quot; is the dimension length of &quot;input_x&quot;.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (Union[tuple(int), list(int)): The indices of the dimensions to reverse.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The target tensor. The data type is Number except float64.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `axis` is neither list nor tuple.</span>
<span class="sd">        TypeError: If element of `axis` is not an int.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; op = ops.ReverseV2(axis=[1])</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[4 3 2 1]</span>
<span class="sd">         [8 7 6 5]]</span>
<span class="sd">        &gt;&gt;&gt; op = ops.ReverseV2(axis=[1, 0])</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[8 7 6 5]</span>
<span class="sd">         [4 3 2 1]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ReverseV2.&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;axis&#39;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">each</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">axis</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="sa">f</span><span class="s1">&#39;axis[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s1">]&#39;</span><span class="p">,</span> <span class="n">each</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">axis</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="n">dim</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">each</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_int_range</span><span class="p">(</span><span class="n">each</span><span class="p">,</span> <span class="o">-</span><span class="n">dim</span><span class="p">,</span> <span class="n">dim</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">INC_LEFT</span><span class="p">,</span> <span class="sa">f</span><span class="s1">&#39;axis[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s1">]&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">normalized_axis</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">axis</span><span class="p">):</span>
            <span class="k">if</span> <span class="n">v</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
                <span class="n">normalized_axis</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">v</span> <span class="o">+</span> <span class="n">dim</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">normalized_axis</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">v</span><span class="p">)</span>

        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">normalized_axis</span><span class="p">)</span> <span class="o">!=</span> <span class="nb">len</span><span class="p">(</span><span class="nb">set</span><span class="p">(</span><span class="n">normalized_axis</span><span class="p">)):</span>
            <span class="n">duplicated</span> <span class="o">=</span> <span class="p">[</span><span class="n">item</span> <span class="k">for</span> <span class="n">item</span><span class="p">,</span> <span class="n">count</span> <span class="ow">in</span> <span class="n">Counter</span><span class="p">(</span><span class="n">normalized_axis</span><span class="p">)</span><span class="o">.</span><span class="n">items</span><span class="p">()</span> <span class="k">if</span> <span class="n">count</span> <span class="o">&gt;</span> <span class="mi">1</span><span class="p">]</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the &#39;axis&#39; cannot contain duplicate dimensions,&quot;</span>
                             <span class="sa">f</span><span class="s2">&quot; but got duplicated elements </span><span class="si">{</span><span class="n">duplicated</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="k">return</span> <span class="n">x_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">bool_</span><span class="p">,)</span> <span class="o">+</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_dtype</span></div>


<div class="viewcode-block" id="Rint"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Rint.html#mindspore.ops.Rint">[docs]</a><span class="k">class</span> <span class="nc">Rint</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns an integer that is closest to x element-wise.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The target tensor, which must be one of the following types:</span>
<span class="sd">          float16, float32. The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `input_x` is not in [float16, float32, float64].</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([-1.6, -0.1, 1.5, 2.0]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; op = ops.Rint()</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [-2.  0.  2.  2.]</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[-2.0, -1.9, -1.8, -1.7, -1.6],</span>
<span class="sd">        ...                            [-2.0, -1.9, -1.8, -1.7, -1.6]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[-2. -2. -2. -2. -2.]</span>
<span class="sd">         [-2. -2. -2. -2. -2.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Rint.&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span></div>



<div class="viewcode-block" id="Select"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Select.html#mindspore.ops.Select">[docs]</a><span class="k">class</span> <span class="nc">Select</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>

<span class="sd">    Returns the selected elements, either from input :math:`x` or input :math:`y`, depending on the `condition`.</span>

<span class="sd">    Given a tensor as input, this operation inserts a dimension of 1 at the dimension,</span>
<span class="sd">    it was invalid when both :math:`x` and :math:`y` are none.</span>
<span class="sd">    Keep in mind that the shape of the output tensor can vary depending</span>
<span class="sd">    on how many true values are in the input. Indexes are output in row-first</span>
<span class="sd">    order.</span>

<span class="sd">    The conditional tensor acts as an optional compensation (mask), which</span>
<span class="sd">    determines whether the corresponding element / row in the output must be</span>
<span class="sd">    selected from :math:`x` (if true) or :math:`y` (if false) based on the value of each</span>
<span class="sd">    element.</span>

<span class="sd">    It can be defined as:</span>

<span class="sd">    .. math::</span>
<span class="sd">        out_i = \begin{cases}</span>
<span class="sd">        x_i, &amp; \text{if } condition_i \\</span>
<span class="sd">        y_i, &amp; \text{otherwise}</span>
<span class="sd">        \end{cases}</span>

<span class="sd">    If condition is a vector, then :math:`x` and :math:`y` are higher-dimensional matrices, then it</span>
<span class="sd">    chooses to copy that row (external dimensions) from :math:`x` and :math:`y`. If condition has</span>
<span class="sd">    the same shape as :math:`x` and :math:`y`, you can choose to copy these elements from :math:`x`</span>
<span class="sd">    and :math:`y`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_cond** (Tensor[bool]) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.</span>
<span class="sd">          The condition tensor, decides which element is chosen.</span>
<span class="sd">        - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.</span>
<span class="sd">          The first input tensor.</span>
<span class="sd">        - **input_y** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.</span>
<span class="sd">          The second input tensor.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape as `input_x`. The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` or `input_y` is not a Tensor.</span>
<span class="sd">        ValueError: If shape of `input_x` is not equal to shape of `input_y` or shape of `input_cond`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; select = ops.Select()</span>
<span class="sd">        &gt;&gt;&gt; input_cond = Tensor([True, False])</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor([2,3], mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; input_y = Tensor([1,2], mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = select(input_cond, input_x, input_y)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [2. 2.]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Select.&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;condition&#39;</span><span class="p">,</span> <span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;y&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span></div>


<span class="k">def</span> <span class="nf">_compute_slicing_length</span><span class="p">(</span><span class="n">begin</span><span class="p">,</span> <span class="n">end</span><span class="p">,</span> <span class="n">stride</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">i</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;Computes the length of the slicing.&quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="n">i</span> <span class="o">&gt;=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">):</span>
        <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;StridedSlice&#39;, the index must be less than &quot;</span>
                         <span class="sa">f</span><span class="s2">&quot;the dimension of &#39;input_x&#39;, but got the dimension of &#39;input_x&#39;: </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span><span class="si">}</span><span class="s2"> &quot;</span>
                         <span class="sa">f</span><span class="s2">&quot;and the index: </span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
    <span class="n">x_dim</span> <span class="o">=</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
    <span class="k">if</span> <span class="n">stride</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
        <span class="c1"># When slicing forward, convert begin and end to positive numbers.</span>
        <span class="k">if</span> <span class="n">begin</span> <span class="o">&gt;=</span> <span class="n">x_dim</span> <span class="ow">or</span> <span class="n">end</span> <span class="o">&lt;</span> <span class="o">-</span><span class="n">x_dim</span><span class="p">:</span>
            <span class="c1"># When slicing forward, if begin &gt;= x_dim or end &lt; -x_dim, the length of the slicing is 0.</span>
            <span class="n">slicing_length</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">if</span> <span class="o">-</span><span class="n">x_dim</span> <span class="o">&lt;=</span> <span class="n">begin</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
                <span class="n">begin</span> <span class="o">+=</span> <span class="n">x_dim</span>
            <span class="k">if</span> <span class="n">begin</span> <span class="o">&lt;</span> <span class="o">-</span><span class="n">x_dim</span><span class="p">:</span>
                <span class="c1"># When slicing forward, if begin &lt; -x_dim, set begin = 0, which means start from the 0th element.</span>
                <span class="n">begin</span> <span class="o">=</span> <span class="mi">0</span>
            <span class="k">if</span> <span class="o">-</span><span class="n">x_dim</span> <span class="o">&lt;=</span> <span class="n">end</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
                <span class="n">end</span> <span class="o">+=</span> <span class="n">x_dim</span>
            <span class="k">if</span> <span class="n">end</span> <span class="o">&gt;</span> <span class="n">x_dim</span><span class="p">:</span>
                <span class="c1"># When slicing forward, if end &gt; x_dim, set end = x_dims, which means slice to the last element.</span>
                <span class="n">end</span> <span class="o">=</span> <span class="n">x_dim</span>
            <span class="k">if</span> <span class="n">begin</span> <span class="o">&gt;=</span> <span class="n">end</span><span class="p">:</span>
                <span class="c1"># When slicing forward, if begin &gt;= end, the length of the slicing is 0.</span>
                <span class="n">slicing_length</span> <span class="o">=</span> <span class="mi">0</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">slicing_length</span> <span class="o">=</span> <span class="mi">1</span> <span class="o">+</span> <span class="p">(</span><span class="n">end</span> <span class="o">-</span> <span class="mi">1</span> <span class="o">-</span> <span class="n">begin</span><span class="p">)</span> <span class="o">//</span> <span class="n">stride</span>
    <span class="k">else</span><span class="p">:</span>
        <span class="c1"># When slicing backward, convert begin and end to negative numbers.</span>
        <span class="k">if</span> <span class="n">begin</span> <span class="o">&lt;</span> <span class="o">-</span><span class="n">x_dim</span> <span class="ow">or</span> <span class="n">end</span> <span class="o">&gt;=</span> <span class="n">x_dim</span><span class="p">:</span>
            <span class="c1"># When slicing backward, if begin &lt; -x_dim or end &gt;= x_dim, the length of the slicing is 0.</span>
            <span class="n">slicing_length</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">if</span> <span class="mi">0</span> <span class="o">&lt;=</span> <span class="n">begin</span> <span class="o">&lt;</span> <span class="n">x_dim</span><span class="p">:</span>
                <span class="n">begin</span> <span class="o">+=</span> <span class="o">-</span><span class="n">x_dim</span>
            <span class="k">if</span> <span class="n">begin</span> <span class="o">&gt;=</span> <span class="n">x_dim</span><span class="p">:</span>
                <span class="n">begin</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span>
            <span class="k">if</span> <span class="mi">0</span> <span class="o">&lt;=</span> <span class="n">end</span> <span class="o">&lt;</span> <span class="n">x_dim</span><span class="p">:</span>
                <span class="n">end</span> <span class="o">+=</span> <span class="o">-</span><span class="n">x_dim</span>
            <span class="k">if</span> <span class="n">end</span> <span class="o">&lt;</span> <span class="o">-</span><span class="n">x_dim</span> <span class="o">-</span> <span class="mi">1</span><span class="p">:</span>
                <span class="c1"># Slicing to the 0th element.</span>
                <span class="n">end</span> <span class="o">=</span> <span class="o">-</span><span class="n">x_dim</span> <span class="o">-</span> <span class="mi">1</span>
            <span class="k">if</span> <span class="n">begin</span> <span class="o">&lt;=</span> <span class="n">end</span><span class="p">:</span>
                <span class="n">slicing_length</span> <span class="o">=</span> <span class="mi">0</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">slicing_length</span> <span class="o">=</span> <span class="mi">1</span> <span class="o">+</span> <span class="p">(</span><span class="n">end</span> <span class="o">+</span> <span class="mi">1</span> <span class="o">-</span> <span class="n">begin</span><span class="p">)</span> <span class="o">//</span> <span class="n">stride</span>
    <span class="k">return</span> <span class="n">slicing_length</span>


<div class="viewcode-block" id="StridedSlice"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.StridedSlice.html#mindspore.ops.StridedSlice">[docs]</a><span class="k">class</span> <span class="nc">StridedSlice</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>

<span class="sd">    Extracts a strided slice of a tensor.</span>

<span class="sd">    Given an input tensor, this operation inserts a dimension of length 1 at the dimension.</span>
<span class="sd">    This operation extracts a fragment of size (end-begin)/stride from the given &#39;input_tensor&#39;.</span>
<span class="sd">    Starting from the beginning position, the fragment continues adding stride to the index until</span>
<span class="sd">    all dimensions are not less than the ending position.</span>

<span class="sd">    Given a `input_x[m1, m2, ..., mn]`, `begin`, `end` and `strides` will be vectors of length n.</span>

<span class="sd">    In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask`, `shrink_axis_mask`)</span>
<span class="sd">    the ith bit will correspond to the ith m.</span>

<span class="sd">    If the ith bit of `begin_mask` is set, `begin[i]` is ignored and the fullest possible range in that dimension</span>
<span class="sd">    is used instead. `end_mask` is analogous, except with the end range.</span>

<span class="sd">    As for a 5*6*7 tensor, `x[2:,:3,:]` is equivalent to `x[2:5,0:3,0:7]`.</span>

<span class="sd">    If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions as needed will be inserted between</span>
<span class="sd">    other dimensions. Only one non-zero bit is allowed in `ellipsis_mask`.</span>

<span class="sd">    As for a 5*6*7*8 tensor, `x[2:,...,:6]` is equivalent to `x[2:5,:,:,0:6]`.</span>
<span class="sd">    `x[2:,...]` is equivalent to `x[2:5,:,:,:]`.</span>

<span class="sd">    If the ith bit of `new_axis_mask` is set, `begin`, `end` and `strides` are ignored and a new length 1</span>
<span class="sd">    dimension is added at the specified position in tthe output tensor.</span>

<span class="sd">    As for a 5*6*7 tensor, `x[:2, newaxis, :6]` will produce a tensor with shape (2, 1, 7).</span>

<span class="sd">    If the ith bit of `shrink_axis_mask` is set, ith size shrinks the dimension by 1, taking on the value</span>
<span class="sd">    at index `begin[i]`, `end[i]` and `strides[i]` are ignored.</span>

<span class="sd">    As for a 5*6*7 tensor, `x[:, 5, :]` will result in `shrink_axis_mask` equal to 4.</span>

<span class="sd">    Note:</span>
<span class="sd">        The stride may be negative value, which causes reverse slicing.</span>
<span class="sd">        The shape of `begin`, `end` and `strides` must be the same.</span>
<span class="sd">        `begin` and `end` are zero-indexed. The element of `strides` must be non-zero.</span>

<span class="sd">    Args:</span>
<span class="sd">        begin_mask (int): Starting index of the slice. Default: 0.</span>
<span class="sd">        end_mask (int): Ending index of the slice. Default: 0.</span>
<span class="sd">        ellipsis_mask (int): An int mask. Default: 0.</span>
<span class="sd">        new_axis_mask (int): An int mask. Default: 0.</span>
<span class="sd">        shrink_axis_mask (int): An int mask. Default: 0.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input Tensor.</span>
<span class="sd">        - **begin** (tuple[int]) - A tuple which represents the location where to start. Only</span>
<span class="sd">          constant value is allowed.</span>
<span class="sd">        - **end** (tuple[int]) - A tuple or which represents the maximum location where to end.</span>
<span class="sd">          Only constant value is allowed.</span>
<span class="sd">        - **strides** (tuple[int]) - A tuple which represents the stride is continuously added</span>
<span class="sd">          before reaching the maximum location. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, The output is explained by following example.</span>

<span class="sd">        In the 0th dimension, begin is 1, end is 2, and strides is 1,</span>
<span class="sd">        because :math:`1+1=2\geq2`, the interval is :math:`[1,2)`.</span>
<span class="sd">        Thus, return the element with :math:`index = 1` in 0th dimension, i.e., [[3, 3, 3], [4, 4, 4]].</span>

<span class="sd">        In the 1st dimension, similarly, the interval is :math:`[0,1)`.</span>
<span class="sd">        Based on the return value of the 0th dimension, return the element with :math:`index = 0`,</span>
<span class="sd">        i.e., [3, 3, 3].</span>

<span class="sd">        In the 2nd dimension, similarly, the interval is :math:`[0,3)`.</span>
<span class="sd">        Based on the return value of the 1st dimension, return the element with :math:`index = 0,1,2`,</span>
<span class="sd">        i.e., [3, 3, 3].</span>

<span class="sd">        Finally, the output is [3, 3, 3].</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is not an int.</span>
<span class="sd">        TypeError: If `begin`, `end` or `strides` is not a tuple.</span>
<span class="sd">        ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is less than 0.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],</span>
<span class="sd">        ...                   [[5, 5, 5], [6, 6, 6]]], mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; #         [[[1. 1. 1.]</span>
<span class="sd">        &gt;&gt;&gt; #           [2. 2. 2.]]</span>
<span class="sd">        &gt;&gt;&gt; #</span>
<span class="sd">        &gt;&gt;&gt; #          [[3. 3. 3.]</span>
<span class="sd">        &gt;&gt;&gt; #           [4. 4. 4.]]</span>
<span class="sd">        &gt;&gt;&gt; #</span>
<span class="sd">        &gt;&gt;&gt; #          [[5. 5. 5.]</span>
<span class="sd">        &gt;&gt;&gt; #           [6. 6. 6.]]]</span>
<span class="sd">        &gt;&gt;&gt; # In order to visually view the multi-dimensional array, write the above as follows：</span>
<span class="sd">        &gt;&gt;&gt; #         [</span>
<span class="sd">        &gt;&gt;&gt; #             [</span>
<span class="sd">        &gt;&gt;&gt; #                 [1,1,1]</span>
<span class="sd">        &gt;&gt;&gt; #                 [2,2,2]</span>
<span class="sd">        &gt;&gt;&gt; #             ]</span>
<span class="sd">        &gt;&gt;&gt; #             [</span>
<span class="sd">        &gt;&gt;&gt; #                 [3,3,3]</span>
<span class="sd">        &gt;&gt;&gt; #                 [4,4,4]</span>
<span class="sd">        &gt;&gt;&gt; #             ]</span>
<span class="sd">        &gt;&gt;&gt; #             [</span>
<span class="sd">        &gt;&gt;&gt; #                 [5,5,5]</span>
<span class="sd">        &gt;&gt;&gt; #                 [6,6,6]</span>
<span class="sd">        &gt;&gt;&gt; #             ]</span>
<span class="sd">        &gt;&gt;&gt; #         ]</span>
<span class="sd">        &gt;&gt;&gt; strided_slice = ops.StridedSlice()</span>
<span class="sd">        &gt;&gt;&gt; output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))</span>
<span class="sd">        &gt;&gt;&gt; # Take this &quot; output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1)) &quot; as an example,</span>
<span class="sd">        &gt;&gt;&gt; # start = [1, 0, 2] , end = [3, 1, 3], stride = [1, 1, 1], Find a segment of (start, end),</span>
<span class="sd">        &gt;&gt;&gt; # note that end is an open interval</span>
<span class="sd">        &gt;&gt;&gt; # To facilitate understanding, this operator can be divided into three steps:</span>
<span class="sd">        &gt;&gt;&gt; # Step 1: Calculation of the first dimension:</span>
<span class="sd">        &gt;&gt;&gt; # start = 1, end = 3, stride = 1, So can take 1st, 2nd rows, and then gets the final output at this time.</span>
<span class="sd">        &gt;&gt;&gt; # output_1th =</span>
<span class="sd">        &gt;&gt;&gt; # [</span>
<span class="sd">        &gt;&gt;&gt; #     [</span>
<span class="sd">        &gt;&gt;&gt; #         [3,3,3]</span>
<span class="sd">        &gt;&gt;&gt; #         [4,4,4]</span>
<span class="sd">        &gt;&gt;&gt; #     ]</span>
<span class="sd">        &gt;&gt;&gt; #     [</span>
<span class="sd">        &gt;&gt;&gt; #         [5,5,5]</span>
<span class="sd">        &gt;&gt;&gt; #         [6,6,6]</span>
<span class="sd">        &gt;&gt;&gt; #     ]</span>
<span class="sd">        &gt;&gt;&gt; # ]</span>
<span class="sd">        &gt;&gt;&gt; # Step 2: Calculation of the second dimension</span>
<span class="sd">        &gt;&gt;&gt; # 2nd dimension, start = 0, end = 1, stride = 1. So only 0th rows can be taken, and the output at this time.</span>
<span class="sd">        &gt;&gt;&gt; # output_2nd =</span>
<span class="sd">        &gt;&gt;&gt; # [</span>
<span class="sd">        &gt;&gt;&gt; #     [</span>
<span class="sd">        &gt;&gt;&gt; #         [3,3,3]</span>
<span class="sd">        &gt;&gt;&gt; #     ]</span>
<span class="sd">        &gt;&gt;&gt; #     [</span>
<span class="sd">        &gt;&gt;&gt; #         [5,5,5]</span>
<span class="sd">        &gt;&gt;&gt; #     ]</span>
<span class="sd">        &gt;&gt;&gt; # ]</span>
<span class="sd">        &gt;&gt;&gt; # Step 3: Calculation of the third dimension</span>
<span class="sd">        &gt;&gt;&gt; # 3nd dimension,start = 2, end = 3, stride = 1, So can take 2th cols,</span>
<span class="sd">        &gt;&gt;&gt; # and you get the final output at this time.</span>
<span class="sd">        &gt;&gt;&gt; # output_3ed =</span>
<span class="sd">        &gt;&gt;&gt; # [</span>
<span class="sd">        &gt;&gt;&gt; #     [</span>
<span class="sd">        &gt;&gt;&gt; #         [3]</span>
<span class="sd">        &gt;&gt;&gt; #     ]</span>
<span class="sd">        &gt;&gt;&gt; #     [</span>
<span class="sd">        &gt;&gt;&gt; #         [5]</span>
<span class="sd">        &gt;&gt;&gt; #     ]</span>
<span class="sd">        &gt;&gt;&gt; # ]</span>
<span class="sd">        &gt;&gt;&gt; # The final output after finishing is:</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[3.]]</span>
<span class="sd">         [[5.]]]</span>
<span class="sd">        &gt;&gt;&gt; # another example like :</span>
<span class="sd">        &gt;&gt;&gt; output = strided_slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[3. 3. 3.]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
                 <span class="n">begin_mask</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
                 <span class="n">end_mask</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
                 <span class="n">ellipsis_mask</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
                 <span class="n">new_axis_mask</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
                 <span class="n">shrink_axis_mask</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize StridedSlice&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;begin&#39;</span><span class="p">,</span> <span class="s1">&#39;end&#39;</span><span class="p">,</span> <span class="s1">&#39;strides&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>
        <span class="c1"># auto parallel haven&#39;t support begin_mask and end_mask</span>
        <span class="k">if</span> <span class="n">context</span><span class="o">.</span><span class="n">get_auto_parallel_context</span><span class="p">(</span><span class="s2">&quot;parallel_mode&quot;</span><span class="p">)</span> <span class="ow">in</span> <span class="p">[</span><span class="s2">&quot;semi_auto_parallel&quot;</span><span class="p">,</span> <span class="s2">&quot;auto_parallel&quot;</span><span class="p">]:</span>
            <span class="n">begin_mask</span> <span class="o">=</span> <span class="mi">0</span>
            <span class="n">end_mask</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">begin_mask</span><span class="p">,</span> <span class="s1">&#39;begin_mask&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">end_mask</span><span class="p">,</span> <span class="s1">&#39;end_mask&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">ellipsis_mask</span><span class="p">,</span> <span class="s1">&#39;ellipsis_mask&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="nb">tuple</span><span class="p">(</span><span class="nb">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">x</span> <span class="o">==</span> <span class="s1">&#39;1&#39;</span><span class="p">,</span> <span class="nb">bin</span><span class="p">(</span><span class="n">ellipsis_mask</span><span class="p">)[</span><span class="o">-</span><span class="mi">1</span><span class="p">:</span><span class="mi">1</span><span class="p">:</span><span class="o">-</span><span class="mi">1</span><span class="p">])))</span> <span class="o">&gt;</span> <span class="mi">1</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, only support one ellipsis in the index, but got </span><span class="si">{</span><span class="n">end_mask</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">new_axis_mask</span><span class="p">,</span> <span class="s1">&#39;new_axis_mask&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">shrink_axis_mask</span><span class="p">,</span> <span class="s1">&#39;shrink_axis_mask&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">_check_and_get_value</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">slice_input</span><span class="p">,</span> <span class="n">name</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Check begin, end, strides. Get its length and value.&quot;&quot;&quot;</span>
        <span class="n">slice_value</span> <span class="o">=</span> <span class="n">slice_input</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="k">if</span> <span class="n">slice_value</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">slice_input</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">slice_shape</span> <span class="o">=</span> <span class="n">slice_input</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
            <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">slice_shape</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">1</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, both the &#39;begins&#39;, &#39;ends&#39;, and &#39;strides&#39; must be 1-D, &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;but got &#39;</span><span class="si">{</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39; shape: </span><span class="si">{</span><span class="n">slice_shape</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
            <span class="c1"># not support scalar</span>
            <span class="k">return</span> <span class="n">slice_value</span><span class="p">,</span> <span class="n">slice_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>

        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">slice_value</span><span class="p">,</span> <span class="n">Tensor_</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">slice_input</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">slice_value</span> <span class="o">=</span> <span class="n">slice_value</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">()</span><span class="o">.</span><span class="n">tolist</span><span class="p">()</span>
        <span class="k">elif</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">slice_value</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, both the &#39;begin&#39;, &#39;end&#39;, and &#39;strides&#39; must be a tuple or Tensor, &quot;</span>
                            <span class="sa">f</span><span class="s2">&quot;but got &#39;</span><span class="si">{</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;: </span><span class="si">{</span><span class="n">slice_value</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="k">if</span> <span class="nb">tuple</span><span class="p">(</span><span class="nb">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="nb">int</span><span class="p">),</span> <span class="n">slice_value</span><span class="p">)):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the elements of &#39;begin&#39;, &#39;end&#39;, and &#39;strides&#39; must be int, &quot;</span>
                            <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="n">name</span><span class="si">}</span><span class="s2">: </span><span class="si">{</span><span class="n">slice_value</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">slice_value</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">slice_value</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">begin</span><span class="p">,</span> <span class="n">end</span><span class="p">,</span> <span class="n">strides</span><span class="p">):</span>
        <span class="n">x_shape</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="k">if</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">in</span> <span class="n">x_shape</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, input x is currently not support dynamic shape.&quot;</span><span class="p">)</span>
        <span class="n">begin_v</span><span class="p">,</span> <span class="n">begin_len</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_check_and_get_value</span><span class="p">(</span><span class="n">begin</span><span class="p">,</span> <span class="s1">&#39;begin&#39;</span><span class="p">)</span>
        <span class="n">end_v</span><span class="p">,</span> <span class="n">end_len</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_check_and_get_value</span><span class="p">(</span><span class="n">end</span><span class="p">,</span> <span class="s1">&#39;end&#39;</span><span class="p">)</span>
        <span class="n">strides_v</span><span class="p">,</span> <span class="n">strides_len</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_check_and_get_value</span><span class="p">(</span><span class="n">strides</span><span class="p">,</span> <span class="s1">&#39;strides&#39;</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">strides_v</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="nb">tuple</span><span class="p">(</span><span class="nb">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">x</span> <span class="o">==</span> <span class="mi">0</span><span class="p">,</span> <span class="n">strides_v</span><span class="p">)):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the &#39;strides&#39; cannot contain 0, but got &#39;strides&#39;: </span><span class="si">{</span><span class="n">strides_v</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">begin_len</span> <span class="o">!=</span> <span class="n">strides_len</span> <span class="ow">or</span> <span class="n">end_len</span> <span class="o">!=</span> <span class="n">strides_len</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, &#39;begin&#39;, &#39;end&#39; and &#39;strides&#39; must be the same length, but got &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;&#39;begin&#39; length: </span><span class="si">{</span><span class="n">begin_len</span><span class="si">}</span><span class="s2">, &#39;end&#39; length: </span><span class="si">{</span><span class="n">end_len</span><span class="si">}</span><span class="s2">, &#39;strides&#39; length: </span><span class="si">{</span><span class="n">strides_len</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="k">if</span> <span class="kc">None</span> <span class="ow">in</span> <span class="p">(</span><span class="n">strides_v</span><span class="p">,</span> <span class="n">begin_v</span><span class="p">,</span> <span class="n">end_v</span><span class="p">):</span>
            <span class="n">ret_shape</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_compute_dynamic_slicing_shape</span><span class="p">(</span><span class="n">x_shape</span><span class="p">,</span> <span class="n">begin_len</span><span class="p">)</span>
            <span class="n">ret_min_shape</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
            <span class="n">ret_max_shape</span> <span class="o">=</span> <span class="n">x_shape</span>
            <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">val</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">ret_shape</span><span class="p">):</span>
                <span class="k">if</span> <span class="n">val</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
                    <span class="n">ret_min_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">val</span>
                    <span class="n">ret_max_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">val</span>
            <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">ret_shape</span><span class="p">,</span>
                    <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                    <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">,</span>
                    <span class="s1">&#39;max_shape&#39;</span><span class="p">:</span> <span class="n">ret_max_shape</span><span class="p">,</span>
                    <span class="s1">&#39;min_shape&#39;</span><span class="p">:</span> <span class="n">ret_min_shape</span><span class="p">}</span>

        <span class="n">ret_shape</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_compute_slicing_shape</span><span class="p">(</span><span class="n">x_shape</span><span class="p">,</span> <span class="n">begin_v</span><span class="p">,</span> <span class="n">end_v</span><span class="p">,</span> <span class="n">strides_v</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">all</span><span class="p">(</span><span class="n">ret_shape</span><span class="p">):</span>
            <span class="n">value</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">init_func</span> <span class="o">=</span> <span class="n">Zero</span><span class="p">()</span>
            <span class="n">init_func</span><span class="o">.</span><span class="n">__enable_zero_dim__</span> <span class="o">=</span> <span class="kc">True</span>
            <span class="n">value</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">dtype</span><span class="o">=</span><span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">element_type</span><span class="p">(),</span> <span class="n">shape</span><span class="o">=</span><span class="n">ret_shape</span><span class="p">,</span> <span class="n">init</span><span class="o">=</span><span class="n">init_func</span><span class="p">)</span>

        <span class="k">if</span> <span class="s2">&quot;max_value&quot;</span> <span class="ow">in</span> <span class="n">x</span> <span class="ow">and</span> <span class="s2">&quot;min_value&quot;</span> <span class="ow">in</span> <span class="n">x</span><span class="p">:</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;min_value&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s2">&quot;min_value&quot;</span><span class="p">],</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;max_value&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s2">&quot;max_value&quot;</span><span class="p">],</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">max_value_np</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s2">&quot;max_value&quot;</span><span class="p">])</span>
            <span class="n">min_value_np</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">x</span><span class="p">[</span><span class="s2">&quot;min_value&quot;</span><span class="p">])</span>
            <span class="n">slice_index</span> <span class="o">=</span> <span class="p">[]</span>
            <span class="k">for</span> <span class="n">begin_i</span><span class="p">,</span> <span class="n">end_i</span><span class="p">,</span> <span class="n">strides_i</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">begin_v</span><span class="p">,</span> <span class="n">end_v</span><span class="p">,</span> <span class="n">strides_v</span><span class="p">):</span>
                <span class="n">s</span> <span class="o">=</span> <span class="nb">slice</span><span class="p">(</span><span class="n">begin_i</span><span class="p">,</span> <span class="n">end_i</span><span class="p">,</span> <span class="n">strides_i</span><span class="p">)</span>
                <span class="n">slice_index</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">s</span><span class="p">)</span>
            <span class="n">slice_index</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">slice_index</span><span class="p">)</span>
            <span class="n">max_value_slice</span> <span class="o">=</span> <span class="n">max_value_np</span><span class="p">[</span><span class="n">slice_index</span><span class="p">]</span>
            <span class="n">min_value_slice</span> <span class="o">=</span> <span class="n">min_value_np</span><span class="p">[</span><span class="n">slice_index</span><span class="p">]</span>
            <span class="n">max_value_slice</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">max_value_slice</span><span class="o">.</span><span class="n">tolist</span><span class="p">())</span>
            <span class="n">min_value_slice</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">min_value_slice</span><span class="o">.</span><span class="n">tolist</span><span class="p">())</span>
            <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">ret_shape</span><span class="p">,</span>
                    <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                    <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">value</span><span class="p">,</span>
                    <span class="s1">&#39;max_value&#39;</span><span class="p">:</span> <span class="n">max_value_slice</span><span class="p">,</span>
                    <span class="s1">&#39;min_value&#39;</span><span class="p">:</span> <span class="n">min_value_slice</span><span class="p">}</span>

        <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">ret_shape</span><span class="p">,</span>
                <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="n">value</span><span class="p">}</span>

    <span class="k">def</span> <span class="nf">_compute_slicing_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">begin_v</span><span class="p">,</span> <span class="n">end_v</span><span class="p">,</span> <span class="n">strides_v</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Computes the shape of the slicing.&quot;&quot;&quot;</span>
        <span class="n">x_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">slice_len</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">begin_v</span><span class="p">)</span>

        <span class="c1"># After the integer is converted to binary, it is a str and the first two chars are the flag char &#39;0b&#39;.</span>
        <span class="n">begin_pos</span> <span class="o">=</span> <span class="nb">bin</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">begin_mask</span><span class="p">)[</span><span class="o">-</span><span class="mi">1</span><span class="p">:</span><span class="mi">1</span><span class="p">:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
        <span class="n">end_pos</span> <span class="o">=</span> <span class="nb">bin</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">end_mask</span><span class="p">)[</span><span class="o">-</span><span class="mi">1</span><span class="p">:</span><span class="mi">1</span><span class="p">:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
        <span class="n">ellipsis_pos</span> <span class="o">=</span> <span class="nb">bin</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">ellipsis_mask</span><span class="p">)[</span><span class="o">-</span><span class="mi">1</span><span class="p">:</span><span class="mi">1</span><span class="p">:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
        <span class="n">new_axis_pos</span> <span class="o">=</span> <span class="nb">bin</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">new_axis_mask</span><span class="p">)[</span><span class="o">-</span><span class="mi">1</span><span class="p">:</span><span class="mi">1</span><span class="p">:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>
        <span class="n">shrink_axis_pos</span> <span class="o">=</span> <span class="nb">bin</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">shrink_axis_mask</span><span class="p">)[</span><span class="o">-</span><span class="mi">1</span><span class="p">:</span><span class="mi">1</span><span class="p">:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>

        <span class="n">ret_shape</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="n">i</span><span class="p">,</span> <span class="n">j</span> <span class="o">=</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span>
        <span class="n">has_ellipsis</span> <span class="o">=</span> <span class="kc">False</span>
        <span class="k">while</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">x_rank</span> <span class="ow">or</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="n">slice_len</span><span class="p">:</span>
            <span class="k">if</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="n">slice_len</span><span class="p">:</span>
                <span class="n">begin</span><span class="p">,</span> <span class="n">end</span><span class="p">,</span> <span class="n">stride</span> <span class="o">=</span> <span class="n">begin_v</span><span class="p">[</span><span class="n">j</span><span class="p">],</span> <span class="n">end_v</span><span class="p">[</span><span class="n">j</span><span class="p">],</span> <span class="n">strides_v</span><span class="p">[</span><span class="n">j</span><span class="p">]</span>

                <span class="k">if</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">ellipsis_pos</span><span class="p">)</span> <span class="ow">and</span> <span class="n">ellipsis_pos</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">==</span> <span class="s1">&#39;1&#39;</span><span class="p">:</span>
                    <span class="c1"># When there is ellipsis, the latter part of the ellipsis will be processed separately.</span>
                    <span class="n">has_ellipsis</span> <span class="o">=</span> <span class="kc">True</span>
                    <span class="k">break</span>
                <span class="k">if</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">begin_pos</span><span class="p">)</span> <span class="ow">and</span> <span class="n">begin_pos</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">==</span> <span class="s1">&#39;1&#39;</span><span class="p">:</span>
                    <span class="n">begin</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span> <span class="k">if</span> <span class="n">strides_v</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">&lt;</span> <span class="mi">0</span> <span class="k">else</span> <span class="mi">0</span>
                <span class="k">if</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">end_pos</span><span class="p">)</span> <span class="ow">and</span> <span class="n">end_pos</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">==</span> <span class="s1">&#39;1&#39;</span><span class="p">:</span>
                    <span class="n">end</span> <span class="o">=</span> <span class="o">-</span><span class="p">(</span><span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">+</span> <span class="mi">1</span><span class="p">)</span> <span class="k">if</span> <span class="n">strides_v</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">&lt;</span> <span class="mi">0</span> <span class="k">else</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
                <span class="k">if</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">new_axis_pos</span><span class="p">)</span> <span class="ow">and</span> <span class="n">new_axis_pos</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">==</span> <span class="s1">&#39;1&#39;</span><span class="p">:</span>
                    <span class="n">ret_shape</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>
                    <span class="n">j</span> <span class="o">+=</span> <span class="mi">1</span>
                    <span class="k">continue</span>
                <span class="k">if</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">shrink_axis_pos</span><span class="p">)</span> <span class="ow">and</span> <span class="n">shrink_axis_pos</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">==</span> <span class="s1">&#39;1&#39;</span><span class="p">:</span>
                    <span class="k">if</span> <span class="p">(</span><span class="ow">not</span> <span class="o">-</span><span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">&lt;=</span> <span class="n">begin</span> <span class="o">&lt;</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">])</span> <span class="ow">or</span> <span class="n">stride</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
                        <span class="k">raise</span> <span class="ne">IndexError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the &#39;strides[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">]&#39; cannot be negative number and &quot;</span>
                                         <span class="sa">f</span><span class="s2">&quot;&#39;begin[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">]&#39; should be in [-</span><span class="si">{</span><span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="si">}</span><span class="s2">, </span><span class="si">{</span><span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="si">}</span><span class="s2">) &quot;</span>
                                         <span class="sa">f</span><span class="s2">&quot;when &#39;shrink_axis_mask&#39; is greater than 0, &quot;</span>
                                         <span class="sa">f</span><span class="s2">&quot;but got &#39;shrink_axis_mask&#39;: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">shrink_axis_mask</span><span class="si">}</span><span class="s2">, &quot;</span>
                                         <span class="sa">f</span><span class="s2">&quot;&#39;strides[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">]&#39;: </span><span class="si">{</span><span class="n">stride</span><span class="si">}</span><span class="s2">, &#39;begin[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">]&#39;: </span><span class="si">{</span><span class="n">begin</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
                    <span class="n">j</span> <span class="o">+=</span> <span class="mi">1</span>
                    <span class="n">i</span> <span class="o">+=</span> <span class="mi">1</span>
                    <span class="k">continue</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">begin</span><span class="p">,</span> <span class="n">end</span><span class="p">,</span> <span class="n">stride</span> <span class="o">=</span> <span class="mi">0</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="mi">1</span>

            <span class="n">slicing_length</span> <span class="o">=</span> <span class="n">_compute_slicing_length</span><span class="p">(</span><span class="n">begin</span><span class="p">,</span> <span class="n">end</span><span class="p">,</span> <span class="n">stride</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">i</span><span class="p">)</span>
            <span class="n">ret_shape</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">slicing_length</span><span class="p">)</span>
            <span class="n">i</span> <span class="o">+=</span> <span class="mi">1</span>
            <span class="n">j</span> <span class="o">+=</span> <span class="mi">1</span>
        <span class="k">if</span> <span class="n">has_ellipsis</span><span class="p">:</span>
            <span class="c1"># When there is ellipsis, handle the second half of the ellipsis split.</span>
            <span class="n">ellipsis_occupied_dims</span> <span class="o">=</span> <span class="n">x_rank</span> <span class="o">-</span> <span class="n">i</span> <span class="o">-</span> <span class="p">(</span><span class="n">slice_len</span> <span class="o">-</span> <span class="p">(</span><span class="n">j</span> <span class="o">+</span> <span class="mi">1</span><span class="p">))</span> <span class="o">+</span> \
                                     <span class="nb">len</span><span class="p">(</span><span class="nb">tuple</span><span class="p">(</span><span class="nb">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">x</span> <span class="o">==</span> <span class="s1">&#39;1&#39;</span><span class="p">,</span> <span class="n">new_axis_pos</span><span class="p">[</span><span class="n">j</span> <span class="o">+</span> <span class="mi">1</span><span class="p">:</span><span class="n">slice_len</span><span class="p">])))</span>
            <span class="n">ret_shape</span><span class="o">.</span><span class="n">extend</span><span class="p">(</span><span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">:</span><span class="n">i</span> <span class="o">+</span> <span class="n">ellipsis_occupied_dims</span><span class="p">])</span>
            <span class="n">j</span> <span class="o">+=</span> <span class="mi">1</span>
            <span class="n">i</span> <span class="o">+=</span> <span class="n">ellipsis_occupied_dims</span>

            <span class="k">while</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">x_rank</span> <span class="ow">or</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="n">slice_len</span><span class="p">:</span>
                <span class="n">begin</span><span class="p">,</span> <span class="n">end</span><span class="p">,</span> <span class="n">stride</span> <span class="o">=</span> <span class="n">begin_v</span><span class="p">[</span><span class="n">j</span><span class="p">],</span> <span class="n">end_v</span><span class="p">[</span><span class="n">j</span><span class="p">],</span> <span class="n">strides_v</span><span class="p">[</span><span class="n">j</span><span class="p">]</span>

                <span class="k">if</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">begin_pos</span><span class="p">)</span> <span class="ow">and</span> <span class="n">begin_pos</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">==</span> <span class="s1">&#39;1&#39;</span><span class="p">:</span>
                    <span class="n">begin</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span> <span class="k">if</span> <span class="n">strides_v</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">&lt;</span> <span class="mi">0</span> <span class="k">else</span> <span class="mi">0</span>
                <span class="k">if</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">end_pos</span><span class="p">)</span> <span class="ow">and</span> <span class="n">end_pos</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">==</span> <span class="s1">&#39;1&#39;</span><span class="p">:</span>
                    <span class="n">end</span> <span class="o">=</span> <span class="o">-</span><span class="p">(</span><span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">+</span> <span class="mi">1</span><span class="p">)</span> <span class="k">if</span> <span class="n">strides_v</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">&lt;</span> <span class="mi">0</span> <span class="k">else</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
                <span class="k">if</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">new_axis_pos</span><span class="p">)</span> <span class="ow">and</span> <span class="n">new_axis_pos</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">==</span> <span class="s1">&#39;1&#39;</span><span class="p">:</span>
                    <span class="n">ret_shape</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>
                    <span class="n">j</span> <span class="o">+=</span> <span class="mi">1</span>
                    <span class="k">continue</span>
                <span class="k">if</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">shrink_axis_pos</span><span class="p">)</span> <span class="ow">and</span> <span class="n">shrink_axis_pos</span><span class="p">[</span><span class="n">j</span><span class="p">]</span> <span class="o">==</span> <span class="s1">&#39;1&#39;</span><span class="p">:</span>
                    <span class="k">if</span> <span class="p">(</span><span class="ow">not</span> <span class="o">-</span><span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">&lt;=</span> <span class="n">begin</span> <span class="o">&lt;</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">])</span> <span class="ow">or</span> <span class="n">stride</span> <span class="o">&lt;</span> <span class="mi">0</span><span class="p">:</span>
                        <span class="k">raise</span> <span class="ne">IndexError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the &#39;strides[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">]&#39; cannot be negative number and &quot;</span>
                                         <span class="sa">f</span><span class="s2">&quot;&#39;begin[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">]&#39; should be in [-</span><span class="si">{</span><span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="si">}</span><span class="s2">, </span><span class="si">{</span><span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="si">}</span><span class="s2">) &quot;</span>
                                         <span class="sa">f</span><span class="s2">&quot;when &#39;shrink_axis_mask&#39; is greater than 0, &quot;</span>
                                         <span class="sa">f</span><span class="s2">&quot;but got &#39;shrink_axis_mask&#39;: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">shrink_axis_mask</span><span class="si">}</span><span class="s2">, &quot;</span>
                                         <span class="sa">f</span><span class="s2">&quot;&#39;strides[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">]&#39;: </span><span class="si">{</span><span class="n">stride</span><span class="si">}</span><span class="s2">, &#39;begin[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">]&#39;: </span><span class="si">{</span><span class="n">begin</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
                    <span class="n">j</span> <span class="o">+=</span> <span class="mi">1</span>
                    <span class="n">i</span> <span class="o">+=</span> <span class="mi">1</span>
                    <span class="k">continue</span>

                <span class="n">slicing_length</span> <span class="o">=</span> <span class="n">_compute_slicing_length</span><span class="p">(</span><span class="n">begin</span><span class="p">,</span> <span class="n">end</span><span class="p">,</span> <span class="n">stride</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">i</span><span class="p">)</span>
                <span class="n">ret_shape</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">slicing_length</span><span class="p">)</span>
                <span class="n">i</span> <span class="o">+=</span> <span class="mi">1</span>
                <span class="n">j</span> <span class="o">+=</span> <span class="mi">1</span>
        <span class="k">return</span> <span class="n">ret_shape</span>

    <span class="k">def</span> <span class="nf">_compute_dynamic_slicing_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">slice_len</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Computes the shape of the slicing for dynamic shape, mask is currently not supported.&quot;&quot;&quot;</span>
        <span class="n">x_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">begin_mask</span> <span class="o">!=</span> <span class="mi">0</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">end_mask</span> <span class="o">!=</span> <span class="mi">0</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">ellipsis_mask</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">new_axis_mask</span> <span class="o">!=</span> <span class="mi">0</span> \
            <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">shrink_axis_mask</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">&quot;Mask is currently not supported if &#39;begin&#39;, &#39;end&#39; or &#39;strides&#39; is not a constant.&quot;</span><span class="p">)</span>
        <span class="n">ret_shape</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="n">i</span><span class="p">,</span> <span class="n">j</span> <span class="o">=</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span>
        <span class="k">while</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">x_rank</span> <span class="ow">or</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="n">slice_len</span><span class="p">:</span>
            <span class="n">slicing_length</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span>
            <span class="k">if</span> <span class="n">j</span> <span class="o">&gt;=</span> <span class="n">slice_len</span><span class="p">:</span>
                <span class="k">if</span> <span class="n">i</span> <span class="o">&gt;=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">):</span>
                    <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;StridedSlice&#39;, the index must be less than or equal to &quot;</span>
                                     <span class="sa">f</span><span class="s2">&quot;the dimension of &#39;input_x&#39;, but got the dimension of &#39;input_x&#39;: </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span><span class="si">}</span><span class="s2"> &quot;</span>
                                     <span class="sa">f</span><span class="s2">&quot;and the index: </span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
                <span class="n">begin</span><span class="p">,</span> <span class="n">end</span><span class="p">,</span> <span class="n">stride</span> <span class="o">=</span> <span class="mi">0</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="mi">1</span>
                <span class="k">if</span> <span class="n">end</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
                    <span class="n">slicing_length</span> <span class="o">=</span> <span class="n">_compute_slicing_length</span><span class="p">(</span><span class="n">begin</span><span class="p">,</span> <span class="n">end</span><span class="p">,</span> <span class="n">stride</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">i</span><span class="p">)</span>
            <span class="n">ret_shape</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">slicing_length</span><span class="p">)</span>
            <span class="n">i</span> <span class="o">+=</span> <span class="mi">1</span>
            <span class="n">j</span> <span class="o">+=</span> <span class="mi">1</span>
        <span class="k">return</span> <span class="n">ret_shape</span></div>


<span class="k">class</span> <span class="nc">Diag</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>

<span class="sd">    Constructs a diagonal tensor with a given diagonal values.</span>

<span class="sd">    Assume `input_x` has dimensions :math:`[D_1,... D_k]`, the output is a tensor of</span>
<span class="sd">    rank 2k with dimensions :math:`[D_1,..., D_k, D_1,..., D_k]` where:</span>
<span class="sd">    :math:`output[i_1,..., i_k, i_1,..., i_k] = input_x[i_1,..., i_k]` and 0 everywhere else.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input tensor. The input shape must be less than 5d.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same dtype as the `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a Tensor.</span>
<span class="sd">        ValueError: If rank of `input_x` is less than 1.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor([1, 2, 3, 4])</span>
<span class="sd">        &gt;&gt;&gt; diag = ops.Diag()</span>
<span class="sd">        &gt;&gt;&gt; output = diag(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1, 0, 0, 0],</span>
<span class="sd">         [0, 2, 0, 0],</span>
<span class="sd">         [0, 0, 3, 0],</span>
<span class="sd">         [0, 0, 0, 4]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Diag&quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_type</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="n">x_type</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_type</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;x rank&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">),</span> <span class="s2">&quot;&quot;</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">)</span>
        <span class="n">ret_shape</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">ret_shape</span> <span class="o">=</span> <span class="n">ret_shape</span> <span class="o">+</span> <span class="n">ret_shape</span>
        <span class="k">return</span> <span class="n">ret_shape</span>

    <span class="k">def</span> <span class="nf">infer_value</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">x</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="k">return</span> <span class="kc">None</span>
        <span class="c1"># do constant-folding only when x rank is 1</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">shape</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">1</span><span class="p">:</span>
            <span class="k">return</span> <span class="kc">None</span>
        <span class="n">ret</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">diag</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">())</span>
        <span class="k">return</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">ret</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">DiagPart</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>

<span class="sd">    Extracts the diagonal part from given tensor.</span>

<span class="sd">    Assume input has dimensions :math:`[D_1,..., D_k, D_1,..., D_k]`, the output is a tensor</span>
<span class="sd">    of rank k with dimensions :math:`[D_1,..., D_k]` where:</span>
<span class="sd">    :math:`output[i_1,..., i_k] = input[i_1,..., i_k, i_1,..., i_k]`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input tensor of rank 2k, k is not zero.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the extracted diagonal has the same dtype as the `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a Tensor.</span>
<span class="sd">        ValueError: If rank of `input_x` is not even or zero.</span>
<span class="sd">        ValueError: If input_shape[i] is not equal to input_shape[i + len(input_shape)/2].</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor([[1, 0, 0, 0],</span>
<span class="sd">        ...                   [0, 2, 0, 0],</span>
<span class="sd">        ...                   [0, 0, 3, 0],</span>
<span class="sd">        ...                   [0, 0, 0, 4]])</span>
<span class="sd">        &gt;&gt;&gt; diag_part = ops.DiagPart()</span>
<span class="sd">        &gt;&gt;&gt; output = diag_part(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [1 2 3 4]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize DiagPart&quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_type</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="n">x_type</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_type</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span> <span class="o">%</span> <span class="mi">2</span> <span class="o">!=</span> <span class="mi">0</span> <span class="ow">or</span> \
                <span class="ow">not</span> <span class="n">x_shape</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For </span><span class="se">\&#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="se">\&#39;</span><span class="s2">, the dimension of &#39;input_x&#39; must be non-zero and even, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got dimension </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span><span class="si">}</span><span class="s2">, with shapes </span><span class="si">{</span><span class="n">x_shape</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="n">length</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span> <span class="o">//</span> <span class="mi">2</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">length</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;input_shape[i + len(input_shape)/2]&#39;</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="n">length</span><span class="p">],</span>
                            <span class="s1">&#39;input_shape[i]&#39;</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">ret_shape</span> <span class="o">=</span> <span class="n">x_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">:</span><span class="n">length</span><span class="p">]</span>
        <span class="k">return</span> <span class="n">ret_shape</span>

    <span class="k">def</span> <span class="nf">infer_value</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">x</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="k">return</span> <span class="kc">None</span>
        <span class="c1"># do constant-folding only when x rank is 2</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">shape</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">2</span><span class="p">:</span>
            <span class="k">return</span> <span class="kc">None</span>
        <span class="n">ret</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">diag</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">())</span>
        <span class="k">return</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">ret</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">Eye</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>

<span class="sd">    Creates a tensor with ones on the diagonal and zeros in the rest.</span>

<span class="sd">    Note:</span>
<span class="sd">        Combines ReverseV2 operator to get an anti-diagonal Tensor,</span>
<span class="sd">        but ReverseV2 only supports Ascend and GPU platforms currently.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **n** (int) - The number of rows of returned tensor. Constant value only.</span>
<span class="sd">        - **m** (int) - The number of columns of returned tensor. Constant value only.</span>
<span class="sd">        - **t** (mindspore.dtype) - MindSpore&#39;s dtype, The data type of the returned tensor.</span>
<span class="sd">          The data type can be Number.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on</span>
<span class="sd">        the user&#39;s Inputs `n` and `m`. And the data type depends on Inputs `t`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `m` or `n` is not an int.</span>
<span class="sd">        ValueError: If `m` or `n` is less than 1.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; eye = ops.Eye()</span>
<span class="sd">        &gt;&gt;&gt; output = eye(2, 2, mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1 0]</span>
<span class="sd">         [0 1]]</span>
<span class="sd">        &gt;&gt;&gt; print(output.dtype)</span>
<span class="sd">        Int32</span>
<span class="sd">        &gt;&gt;&gt; output = eye(1, 2, mindspore.float64)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 0.]]</span>
<span class="sd">        &gt;&gt;&gt; print(output.dtype)</span>
<span class="sd">        Float64</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Eye&quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">infer_value</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">n</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">t</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="s2">&quot;n&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">m</span><span class="p">,</span> <span class="s2">&quot;m&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;dtype&quot;</span><span class="p">:</span> <span class="n">t</span><span class="p">}</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_types_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span> <span class="o">+</span> <span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">bool_</span><span class="p">,),</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">np_type</span> <span class="o">=</span> <span class="n">mstype</span><span class="o">.</span><span class="n">dtype_to_nptype</span><span class="p">(</span><span class="n">t</span><span class="p">)</span>
        <span class="n">ret</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">eye</span><span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="n">m</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">np_type</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">ret</span><span class="p">)</span>


<div class="viewcode-block" id="ScatterNd"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterNd.html#mindspore.ops.ScatterNd">[docs]</a><span class="k">class</span> <span class="nc">ScatterNd</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Scatters a tensor into a new tensor depending on the specified indices.</span>

<span class="sd">    Creates an empty tensor with the given `shape`, and set values by scattering the update tensor</span>
<span class="sd">    depending on indices.</span>

<span class="sd">    The empty tensor has rank P and `indices` has rank Q where `Q &gt;= 2`.</span>

<span class="sd">    `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N &lt;= P`.</span>

<span class="sd">    The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of the empty tensor.</span>

<span class="sd">    `updates` is a tensor of rank `Q-1+P-N`. Its shape is: :math:`(i_0, i_1, ..., i_{Q-2}, shape_N, ..., shape_{P-1})`.</span>

<span class="sd">    The following figure shows the calculation process of inserting two slices in the first dimension of a rank-3</span>
<span class="sd">    with two matrices of new values:</span>

<span class="sd">    .. image:: api_img/ScatterNd.png</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **indices** (Tensor) - The index of scattering in the new tensor with int32 or int64 data type.</span>
<span class="sd">          The rank of indices must be at least 2 and `indices_shape[-1] &lt;= len(shape)`.</span>
<span class="sd">        - **updates** (Tensor) - The source Tensor to be scattered.</span>
<span class="sd">          It has shape `indices_shape[:-1] + shape[indices_shape[-1]:]`.</span>
<span class="sd">        - **shape** (tuple[int]) - Define the shape of the output tensor, has the same data type as indices.</span>
<span class="sd">          The shape of `shape` is :math:`(x_1, x_2, ..., x_R)`, and the length of &#39;shape&#39; is greater than or equal to 2.</span>
<span class="sd">          In other words, the shape of `shape` is at least :math:`(x_1, x_2)`.</span>
<span class="sd">          And the value of any element in `shape` must be greater than or equal to 1.</span>
<span class="sd">          In other words, :math:`x_1` &gt;= 1, :math:`x_2` &gt;= 1.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `shape` is not a tuple.</span>
<span class="sd">        ValueError: If any element of `shape` is less than 1.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; op = ops.ScatterNd()</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0], [2]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],</span>
<span class="sd">        ...                             [3, 3, 3, 3], [4, 4, 4, 4]],</span>
<span class="sd">        ...                            [[1, 1, 1, 1], [2, 2, 2, 2],</span>
<span class="sd">        ...                             [3, 3, 3, 3], [4, 4, 4, 4]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; shape = (4, 4, 4)</span>
<span class="sd">        &gt;&gt;&gt; output = op(indices, updates, shape)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[1. 1. 1. 1.]</span>
<span class="sd">          [2. 2. 2. 2.]</span>
<span class="sd">          [3. 3. 3. 3.]</span>
<span class="sd">          [4. 4. 4. 4.]]</span>
<span class="sd">         [[0. 0. 0. 0.]</span>
<span class="sd">          [0. 0. 0. 0.]</span>
<span class="sd">          [0. 0. 0. 0.]</span>
<span class="sd">          [0. 0. 0. 0.]]</span>
<span class="sd">         [[1. 1. 1. 1.]</span>
<span class="sd">          [2. 2. 2. 2.]</span>
<span class="sd">          [3. 3. 3. 3.]</span>
<span class="sd">          [4. 4. 4. 4.]]</span>
<span class="sd">         [[0. 0. 0. 0.]</span>
<span class="sd">          [0. 0. 0. 0.]</span>
<span class="sd">          [0. 0. 0. 0.]</span>
<span class="sd">          [0. 0. 0. 0.]]]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; shape = (3, 3)</span>
<span class="sd">        &gt;&gt;&gt; output = op(indices, updates, shape)</span>
<span class="sd">        &gt;&gt;&gt; # In order to facilitate understanding, explain the operator pseudo-operation process step by step:</span>
<span class="sd">        &gt;&gt;&gt; # Step 1: Generate an empty Tensor of the specified shape according to the shape</span>
<span class="sd">        &gt;&gt;&gt; # [</span>
<span class="sd">        &gt;&gt;&gt; #     [0. 0. 0.]</span>
<span class="sd">        &gt;&gt;&gt; #     [0. 0. 0.]</span>
<span class="sd">        &gt;&gt;&gt; #     [0. 0. 0.]</span>
<span class="sd">        &gt;&gt;&gt; # ]</span>
<span class="sd">        &gt;&gt;&gt; # Step 2: Modify the data at the specified location according to the indicators</span>
<span class="sd">        &gt;&gt;&gt; # 0th row of indices is [0, 1], 0th row of updates is 3.2.</span>
<span class="sd">        &gt;&gt;&gt; # means that the empty tensor in the 0th row and 1st col set to 3.2</span>
<span class="sd">        &gt;&gt;&gt; # [</span>
<span class="sd">        &gt;&gt;&gt; #     [0. 3.2. 0.]</span>
<span class="sd">        &gt;&gt;&gt; #     [0. 0.   0.]</span>
<span class="sd">        &gt;&gt;&gt; #     [0. 0.   0.]</span>
<span class="sd">        &gt;&gt;&gt; # ]</span>
<span class="sd">        &gt;&gt;&gt; # 1th row of indices is [1, 1], 1th row of updates is 1.1.</span>
<span class="sd">        &gt;&gt;&gt; # means that the empty tensor in the 1th row and 1st col set to 1.1</span>
<span class="sd">        &gt;&gt;&gt; # [</span>
<span class="sd">        &gt;&gt;&gt; #     [0. 3.2. 0.]</span>
<span class="sd">        &gt;&gt;&gt; #     [0. 1.1  0.]</span>
<span class="sd">        &gt;&gt;&gt; #     [0. 0.   0.]</span>
<span class="sd">        &gt;&gt;&gt; # ]</span>
<span class="sd">        &gt;&gt;&gt; # The final result is as follows:</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0. 3.2 0.]</span>
<span class="sd">         [0. 1.1 0.]</span>
<span class="sd">         [0. 0.  0.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ScatterNd&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;update&#39;</span><span class="p">,</span> <span class="s1">&#39;shape&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">indices</span><span class="p">,</span> <span class="n">update</span><span class="p">,</span> <span class="n">shape</span><span class="p">):</span>
        <span class="n">shp</span> <span class="o">=</span> <span class="n">shape</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;update_dtype&quot;</span><span class="p">,</span> <span class="n">update</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;indices&quot;</span><span class="p">,</span> <span class="n">indices</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;shape&quot;</span><span class="p">,</span> <span class="n">shp</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">x</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">shp</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="sa">f</span><span class="s1">&#39;shape[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s1">]&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

        <span class="n">indices_shape</span><span class="p">,</span> <span class="n">update_shape</span> <span class="o">=</span> <span class="n">indices</span><span class="p">[</span><span class="s2">&quot;shape&quot;</span><span class="p">],</span> <span class="n">update</span><span class="p">[</span><span class="s2">&quot;shape&quot;</span><span class="p">]</span>
        <span class="k">if</span> <span class="n">indices_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">!=</span> <span class="n">update_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the first shape of &#39;indices&#39; must be the same as the first shape of &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;&#39;updates&#39;, but got the first shape of &#39;indices&#39;: </span><span class="si">{</span><span class="n">indices_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="si">}</span><span class="s2">, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;the first shape of &#39;updates&#39;: </span><span class="si">{</span><span class="n">update_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">shp</span><span class="p">,</span>
                <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">update</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span></div>


<div class="viewcode-block" id="ResizeNearestNeighbor"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ResizeNearestNeighbor.html#mindspore.ops.ResizeNearestNeighbor">[docs]</a><span class="k">class</span> <span class="nc">ResizeNearestNeighbor</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Resizes the input tensor by using the nearest neighbor algorithm.</span>

<span class="sd">    Resizes the input tensor to a given size by using the nearest neighbor algorithm. The nearest</span>
<span class="sd">    neighbor algorithm selects the value of the nearest point and does not consider the</span>
<span class="sd">    values of neighboring points at all, yielding a piecewise-constant interpolant.</span>

<span class="sd">    Args:</span>
<span class="sd">        size (Union[tuple, list]): The target size. The dimension of size must be 2.</span>
<span class="sd">        align_corners (bool): Whether the centers of the 4 corner pixels of the input</span>
<span class="sd">                              and output tensors are aligned. Default: False.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input tensor. The shape of the tensor is :math:`(N, C, H, W)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of the output tensor is  :math:`(N, C, NEW\_H, NEW\_W)`.</span>
<span class="sd">        The data type is the same as the `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `size` is neither tuple nor list.</span>
<span class="sd">        TypeError: If `align_corners` is not a bool.</span>
<span class="sd">        ValueError: If length of `size` is not equal to 2.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; resize = ops.ResizeNearestNeighbor((2, 2))</span>
<span class="sd">        &gt;&gt;&gt; output = resize(input_tensor)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[[-0.1  0.3]</span>
<span class="sd">           [ 0.4  0.5]]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">size</span><span class="p">,</span> <span class="n">align_corners</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ResizeNearestNeighbor&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;size&quot;</span><span class="p">,</span> <span class="n">size</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;align_corners&quot;</span><span class="p">,</span> <span class="n">align_corners</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_equal_int</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">size</span><span class="p">),</span> <span class="mi">2</span><span class="p">,</span> <span class="s2">&quot;length of size&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">value</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">size</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">value</span><span class="p">,</span> <span class="sa">f</span><span class="s1">&#39;</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s1">th value of size&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;image_in&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;image_out&#39;</span><span class="p">])</span></div>


<span class="k">class</span> <span class="nc">GatherNd</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Gathers slices from a tensor by indices.</span>

<span class="sd">    Using given indices to gather slices from a tensor with a specified shape.</span>

<span class="sd">    `indices` is an K-dimensional integer tensor. Supposes it as a (K-1)-dimensional tensor and each element of it</span>
<span class="sd">    defines a slice of `input_x`:</span>

<span class="sd">    .. math::</span>
<span class="sd">        output[(i_0, ..., i_{K-2})] = input\_x[indices[(i_0, ..., i_{K-2})]]</span>

<span class="sd">    The last dimension of `indices` can not more than the rank of `input_x`:</span>
<span class="sd">    :math:`indices.shape[-1] &lt;= input\_x.rank`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The target tensor to gather values.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **indices** (Tensor) - The index tensor, with int32 or int64 data type.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same type as `input_x` and the shape is indices_shape[:-1] + x_shape[indices_shape[-1]:].</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If length of shape of `input_x` is less than the last dimension of `indices`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; op = ops.GatherNd()</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x, indices)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [-0.1  0.5]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize GatherNd&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>



<div class="viewcode-block" id="TensorScatterUpdate"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.TensorScatterUpdate.html#mindspore.ops.TensorScatterUpdate">[docs]</a><span class="k">class</span> <span class="nc">TensorScatterUpdate</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Creates a new tensor by updating the positions in `input_x` indicated by</span>
<span class="sd">    `indices`, with values from `update`. This operation is almost equivalent to using</span>
<span class="sd">    ScatterNd, except that the updates are applied on `input_x` instead of a zero tensor.</span>

<span class="sd">    `indices` must have rank at least 2, the last axis is the depth of each index</span>
<span class="sd">    vectors. For each index vector, there must be a corresponding value in `update`. If</span>
<span class="sd">    the depth of each index tensor matches the rank of `input_x`, then each index</span>
<span class="sd">    vector corresponds to a scalar in `input_x` and each `update` updates a scalar. If</span>
<span class="sd">    the depth of each index tensor is less than the rank of `input_x`, then each index</span>
<span class="sd">    vector corresponds to a slice in `input_x`, and each `update` updates a slice.</span>

<span class="sd">    The order in which updates are applied is nondeterministic, meaning that if there</span>
<span class="sd">    are multiple index vectors in `indices` that correspond to the same position, the</span>
<span class="sd">    value of that position in the output will be nondeterministic.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">          The data type is Number.</span>
<span class="sd">        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.</span>
<span class="sd">          The rank must be at least 2.</span>
<span class="sd">        - **update** (Tensor) - The tensor to update the input tensor, has the same type as input, and</span>

<span class="sd">          :math:`update.shape = indices.shape[:-1]+input_x.shape[indices.shape[-1]:]`</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `indices` is neither int32 nor int64.</span>
<span class="sd">        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.</span>
<span class="sd">        ValueError: If the value of `input_x` are not match with input `indices`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; update = Tensor(np.array([1.0, 2.2]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; op = ops.TensorScatterUpdate()</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x, indices, update)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 1.   0.3  3.6]</span>
<span class="sd">         [ 0.4  2.2 -3.2]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">):</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">indices_shape</span><span class="p">)</span> <span class="o">&lt;</span> <span class="mi">2</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the dimension of &#39;indices&#39; cannot be less than 2,&quot;</span>
                             <span class="sa">f</span><span class="s2">&quot; but got </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">indices_shape</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">&gt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">input_x_shape</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the last dimension of &#39;indices&#39; must be less than or equal to &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;the dimension of &#39;input_x&#39;, but got the &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;last dimension of &#39;indices&#39;: </span><span class="si">{</span><span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span><span class="si">}</span><span class="s2"> and the dimension of &#39;input_x&#39;: &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">input_x_shape</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="n">updates_shape_check</span> <span class="o">=</span> <span class="n">indices_shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">+</span> <span class="n">input_x_shape</span><span class="p">[</span><span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]:]</span>
        <span class="k">if</span> <span class="n">updates_shape_check</span> <span class="o">!=</span> <span class="n">updates_shape</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the shape of &#39;update&#39; must be equal to updates_shape_check, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;where updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got the shape of &#39;update&#39;: </span><span class="si">{</span><span class="n">updates_shape</span><span class="si">}</span><span class="s2">, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;updates_shape_check: </span><span class="si">{</span><span class="n">updates_shape_check</span><span class="si">}</span><span class="s2">, indices_shape: </span><span class="si">{</span><span class="n">indices_shape</span><span class="si">}</span><span class="s2"> and &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;input_x_shape: </span><span class="si">{</span><span class="n">input_x_shape</span><span class="si">}</span><span class="s2">. Please check input_x_shape and indices_shape.&quot;</span><span class="p">)</span>

        <span class="k">return</span> <span class="n">input_x_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_x_dtype</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="n">updates_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;input_x&quot;</span><span class="p">:</span> <span class="n">input_x_dtype</span><span class="p">,</span> <span class="s2">&quot;updates&quot;</span><span class="p">:</span> <span class="n">updates_dtype</span><span class="p">}</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">bool_</span><span class="p">,)</span> <span class="o">+</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">input_x_dtype</span></div>


<div class="viewcode-block" id="TensorScatterAdd"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.TensorScatterAdd.html#mindspore.ops.TensorScatterAdd">[docs]</a><span class="k">class</span> <span class="nc">TensorScatterAdd</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Creates a new tensor by adding the values from the positions in `input_x` indicated by</span>
<span class="sd">    `indices`, with values from `updates`. When multiple values are given for the same</span>
<span class="sd">    index, the updated result will be the sum of all values. This operation is almost</span>
<span class="sd">    equivalent to using ScatterNdAdd, except that the updates are applied on `Tensor`</span>
<span class="sd">    instead of `Parameter`.</span>

<span class="sd">    The last axis of `indices` is the depth of each index vectors. For each index vector,</span>
<span class="sd">    there must be a corresponding value in `updates`. The shape of `updates` should be</span>
<span class="sd">    equal to the shape of `input_x[indices]`. For more details, see use cases.</span>

<span class="sd">    Note:</span>
<span class="sd">        If some values of the `indices` are out of bound, instead of raising an index error,</span>
<span class="sd">        the corresponding `updates` will not be updated to `input_x`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].</span>
<span class="sd">        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.</span>
<span class="sd">          The rank must be at least 2.</span>
<span class="sd">        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,</span>
<span class="sd">          and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `indices` is neither int32 nor int64.</span>
<span class="sd">        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; # Next, demonstrate the approximate operation process of this operator:</span>
<span class="sd">        &gt;&gt;&gt; # 1, indices[0] = [0, 0], indices[1] = [0, 0]</span>
<span class="sd">        &gt;&gt;&gt; # 2, And input_x[0, 0] = -0.1</span>
<span class="sd">        &gt;&gt;&gt; # 3, So input_x[indices] = [-0.1, -0.1]</span>
<span class="sd">        &gt;&gt;&gt; # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)</span>
<span class="sd">        &gt;&gt;&gt; op = ops.TensorScatterAdd()</span>
<span class="sd">        &gt;&gt;&gt; # 5, Perform the addition operation for the first time:</span>
<span class="sd">        &gt;&gt;&gt; #      first_input_x = input_x[0][0] + updates[0] = [[0.9, 0.3, 3.6], [0.4, 0.5, -3.2]]</span>
<span class="sd">        &gt;&gt;&gt; # 6, Perform the addition operation for the second time:</span>
<span class="sd">        &gt;&gt;&gt; #      second_input_x = input_x[0][0] + updates[1] = [[3.1, 0.3, 3.6], [0.4, 0.5, -3.2]]</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 3.1  0.3  3.6]</span>
<span class="sd">         [ 0.4  0.5 -3.2]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">):</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">indices_shape</span><span class="p">)</span> <span class="o">&lt;</span> <span class="mi">2</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the dimension of &#39;indices&#39; cannot be less than 2,&quot;</span>
                             <span class="sa">f</span><span class="s2">&quot; but got </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">indices_shape</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">&gt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">input_x_shape</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the last dimension of &#39;indices&#39; must be less than or equal to &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;the dimension of &#39;input_x&#39;, but got the &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;last dimension of &#39;indices&#39;: </span><span class="si">{</span><span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span><span class="si">}</span><span class="s2"> and the dimension of &#39;input_x&#39;: &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">input_x_shape</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="n">updates_shape_check</span> <span class="o">=</span> <span class="n">indices_shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">+</span> <span class="n">input_x_shape</span><span class="p">[</span><span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]:]</span>
        <span class="k">if</span> <span class="n">updates_shape_check</span> <span class="o">!=</span> <span class="n">updates_shape</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the shape of &#39;update&#39; must be equal to updates_shape_check, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;where updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got the shape of &#39;update&#39;: </span><span class="si">{</span><span class="n">updates_shape</span><span class="si">}</span><span class="s2">, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;updates_shape_check: </span><span class="si">{</span><span class="n">updates_shape_check</span><span class="si">}</span><span class="s2">, indices_shape: </span><span class="si">{</span><span class="n">indices_shape</span><span class="si">}</span><span class="s2"> and &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;input_x_shape: </span><span class="si">{</span><span class="n">input_x_shape</span><span class="si">}</span><span class="s2">. Please check input_x_shape and indices_shape.&quot;</span><span class="p">)</span>

        <span class="k">return</span> <span class="n">input_x_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_x_dtype</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="n">updates_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;input_x&quot;</span><span class="p">:</span> <span class="n">input_x_dtype</span><span class="p">,</span> <span class="s2">&quot;updates&quot;</span><span class="p">:</span> <span class="n">updates_dtype</span><span class="p">}</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">bool_</span><span class="p">,)</span> <span class="o">+</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">input_x_dtype</span></div>


<div class="viewcode-block" id="ScatterUpdate"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterUpdate.html#mindspore.ops.ScatterUpdate">[docs]</a><span class="k">class</span> <span class="nc">ScatterUpdate</span><span class="p">(</span><span class="n">_ScatterOpDynamic</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Updates tensor values by using input indices and value.</span>

<span class="sd">    Using given values to update tensor value, along with the input indices.</span>

<span class="sd">    for each `i, ..., j` in `indices.shape`:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \text{input_x}[\text{indices}[i, ..., j], :] = \text{updates}[i, ..., j, :]</span>

<span class="sd">    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.</span>
<span class="sd">    If they have different data types, the lower priority data type will be converted to</span>
<span class="sd">    the relatively highest priority data type.</span>

<span class="sd">    Args:</span>
<span class="sd">        use_locking (bool): Whether to protect the assignment by a lock. Default: True.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Parameter) - The target tensor, with data type of Parameter.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **indices** (Tensor) - The index of input tensor. With int32 data type.</span>
<span class="sd">          If there are duplicates in indices, the order for updating is undefined.</span>
<span class="sd">        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,</span>
<span class="sd">          and updates.shape = indices.shape + input_x.shape[1:].</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `use_locking` is not a bool.</span>
<span class="sd">        TypeError: If `indices` is not an int32.</span>
<span class="sd">        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter</span>
<span class="sd">                      is required when data type conversion of Parameter is not supported.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])</span>
<span class="sd">        &gt;&gt;&gt; input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([0, 1]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; np_updates = np.array([[2.0, 1.2, 1.0], [3.0, 1.2, 1.0]])</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np_updates, mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; op = ops.ScatterUpdate()</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[2. 1.2  1.]</span>
<span class="sd">         [3. 1.2  1.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">use_locking</span><span class="o">=</span><span class="kc">True</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ScatterUpdate&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;use_locking&#39;</span><span class="p">,</span> <span class="n">use_locking</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;side_effect_mem&#39;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span></div>


<div class="viewcode-block" id="ScatterNdUpdate"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterNdUpdate.html#mindspore.ops.ScatterNdUpdate">[docs]</a><span class="k">class</span> <span class="nc">ScatterNdUpdate</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Updates tensor values by using input indices and value.</span>

<span class="sd">    Using given values to update tensor value, along with the input indices.</span>

<span class="sd">    `input_x` has rank P and `indices` has rank Q where `Q &gt;= 2`.</span>

<span class="sd">    `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N &lt;= P`.</span>

<span class="sd">    The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.</span>

<span class="sd">    `updates` is a tensor of rank `Q-1+P-N`. Its shape is:</span>
<span class="sd">    :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.</span>

<span class="sd">    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.</span>
<span class="sd">    If they have different data types, the lower priority data type will be converted to</span>
<span class="sd">    the relatively highest priority data type.</span>

<span class="sd">    Args:</span>
<span class="sd">        use_locking (bool): Whether to protect the assignment by a lock. Default: True.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Parameter) - The target tensor, with data type of Parameter.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **indices** (Tensor) - The index of input tensor, with int32 data type.</span>
<span class="sd">        - **updates** (Tensor) - The tensor to be updated to the input tensor, has the same type as input.</span>
<span class="sd">          The shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `use_locking` is not a bool.</span>
<span class="sd">        TypeError: If `indices` is not an int32.</span>
<span class="sd">        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter</span>
<span class="sd">                      is required when data type conversion of Parameter is not supported.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])</span>
<span class="sd">        &gt;&gt;&gt; input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; op = ops.ScatterNdUpdate()</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1.   0.3   3.6]</span>
<span class="sd">         [0.4  2.2  -3.2]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="n">__mindspore_signature__</span> <span class="o">=</span> <span class="p">(</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="n">sig</span><span class="o">.</span><span class="n">sig_rw</span><span class="o">.</span><span class="n">RW_WRITE</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T</span><span class="p">),</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T1</span><span class="p">),</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;updates&#39;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T</span><span class="p">)</span>
    <span class="p">)</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">use_locking</span><span class="o">=</span><span class="kc">True</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ScatterNdUpdate&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;use_locking&#39;</span><span class="p">,</span> <span class="n">use_locking</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;side_effect_mem&#39;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span></div>

<div class="viewcode-block" id="ScatterMax"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterMax.html#mindspore.ops.ScatterMax">[docs]</a><span class="k">class</span> <span class="nc">ScatterMax</span><span class="p">(</span><span class="n">_ScatterOp</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Updates the value of the input tensor through the maximum operation.</span>

<span class="sd">    Using given values to update tensor value through the max operation, along with the input indices.</span>
<span class="sd">    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.</span>

<span class="sd">    for each `i, ..., j` in `indices.shape`:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \text{input_x}[\text{indices}[i, ..., j], :]</span>
<span class="sd">        = max(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])</span>

<span class="sd">    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.</span>
<span class="sd">    If they have different data types, the lower priority data type will be converted to</span>
<span class="sd">    the relatively highest priority data type.</span>

<span class="sd">    Args:</span>
<span class="sd">        use_locking (bool): Whether to protect the assignment by a lock. Default: True.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Parameter) - The target tensor, with data type of Parameter.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **indices** (Tensor) - The index to do max operation whose data type must be mindspore.int32.</span>
<span class="sd">        - **updates** (Tensor) - The tensor that performs the maximum operation with `input_x`,</span>
<span class="sd">          the data type is the same as `input_x`, the shape is `indices_shape + x_shape[1:]`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the updated `input_x`, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `use_locking` is not a bool.</span>
<span class="sd">        TypeError: If `indices` is not an int32.</span>
<span class="sd">        ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.</span>
<span class="sd">        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter</span>
<span class="sd">                      is required when data type conversion of Parameter is not supported.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32),</span>
<span class="sd">        ...                     name=&quot;input_x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.ones([2, 2, 3]) * 88, mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_max = ops.ScatterMax()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_max(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[88. 88. 88.]</span>
<span class="sd">         [88. 88. 88.]]</span>
<span class="sd">    &quot;&quot;&quot;</span></div>


<div class="viewcode-block" id="ScatterMin"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterMin.html#mindspore.ops.ScatterMin">[docs]</a><span class="k">class</span> <span class="nc">ScatterMin</span><span class="p">(</span><span class="n">_ScatterOp</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Updates the value of the input tensor through the minimum operation.</span>

<span class="sd">    Using given values to update tensor value through the min operation, along with the input indices.</span>
<span class="sd">    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.</span>

<span class="sd">    for each `i, ..., j` in `indices.shape`:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \text{input_x}[\text{indices}[i, ..., j], :]</span>
<span class="sd">        = min(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])</span>

<span class="sd">    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.</span>
<span class="sd">    If they have different data types, the lower priority data type will be converted to</span>
<span class="sd">    the relatively highest priority data type.</span>

<span class="sd">    Args:</span>
<span class="sd">        use_locking (bool): Whether to protect the assignment by a lock. Default: False.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Parameter) - The target tensor, with data type of Parameter.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.</span>
<span class="sd">        - **updates** (Tensor) - The tensor doing the min operation with `input_x`,</span>
<span class="sd">          the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the updated `input_x`, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `use_locking` is not a bool.</span>
<span class="sd">        TypeError: If `indices` is not an int32.</span>
<span class="sd">        ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.</span>
<span class="sd">        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter</span>
<span class="sd">                      is required when data type conversion of Parameter is not supported.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[0.0, 1.0, 2.0], [0.0, 0.0, 0.0]]), mindspore.float32),</span>
<span class="sd">        ...                     name=&quot;input_x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; update = Tensor(np.ones([2, 2, 3]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_min = ops.ScatterMin()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_min(input_x, indices, update)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0. 1. 1.]</span>
<span class="sd">         [0. 0. 0.]]</span>
<span class="sd">    &quot;&quot;&quot;</span></div>


<div class="viewcode-block" id="ScatterAdd"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterAdd.html#mindspore.ops.ScatterAdd">[docs]</a><span class="k">class</span> <span class="nc">ScatterAdd</span><span class="p">(</span><span class="n">_ScatterOpDynamic</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Updates the value of the input tensor through the addition operation.</span>

<span class="sd">    Using given values to update tensor value through the add operation, along with the input indices.</span>
<span class="sd">    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.</span>

<span class="sd">    for each `i, ..., j` in `indices.shape`:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{+}= \text{updates}[i, ..., j, :]</span>

<span class="sd">    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.</span>
<span class="sd">    If they have different data types, the lower priority data type will be converted to</span>
<span class="sd">    the relatively highest priority data type.</span>

<span class="sd">    Note:</span>
<span class="sd">        This is an in-place update operator. Therefore, the `input_x` will be updated after the operation is completed.</span>

<span class="sd">    Args:</span>
<span class="sd">        use_locking (bool): Whether to protect the assignment by a lock. Default: False.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Parameter) - The target tensor, with data type of Parameter.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.</span>
<span class="sd">        - **updates** (Tensor) - The tensor doing the min operation with `input_x`,</span>
<span class="sd">          the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the updated `input_x`, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `use_locking` is not a bool.</span>
<span class="sd">        TypeError: If `indices` is not an int32.</span>
<span class="sd">        ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.</span>
<span class="sd">        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter</span>
<span class="sd">                      is required when data type conversion of Parameter is not supported.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.ones([2, 2, 3]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_add = ops.ScatterAdd()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_add(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 1. 1.]</span>
<span class="sd">         [3. 3. 3.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[0, 1], [1, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [0.0, 0.0, 0.0] + [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [0.0, 0.0, 0.0] + [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [1, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [3.0, 3.0, 3.0] + [7.0, 7.0, 7.0] = [10.0, 10.0, 10.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [10.0, 10.0, 10.0] + [9.0, 9.0, 9.0] = [19.0, 19.0, 19.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_add = ops.ScatterAdd()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_add(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 1.  1.  1.]</span>
<span class="sd">         [19. 19. 19.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[1, 0], [1, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [1, 0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [0.0, 0.0, 0.0] + [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [0.0, 0.0, 0.0] + [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [1, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [1.0, 1.0, 1.0] + [7.0, 7.0, 7.0] = [8.0, 8.0, 8.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [8.0, 8.0, 8.0] + [9.0, 9.0, 9.0] = [17.0, 17.0, 17.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_add = ops.ScatterAdd()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_add(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 3.  3.  3.]</span>
<span class="sd">         [17. 17. 17.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[0, 1], [0, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [0.0, 0.0, 0.0] + [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [0.0, 0.0, 0.0] + [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [1.0, 1.0, 1.0] + [7.0, 7.0, 7.0] = [8.0, 8.0, 8.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [3.0, 3.0, 3.0] + [9.0, 9.0, 9.0] = [12.0, 12.0, 12.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 1], [0, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_add = ops.ScatterAdd()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_add(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 8.  8.  8.]</span>
<span class="sd">         [12. 12. 12.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">use_locking</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ScatterAdd&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;use_locking&#39;</span><span class="p">,</span> <span class="n">use_locking</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;side_effect_mem&#39;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span></div>


<div class="viewcode-block" id="ScatterSub"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterSub.html#mindspore.ops.ScatterSub">[docs]</a><span class="k">class</span> <span class="nc">ScatterSub</span><span class="p">(</span><span class="n">_ScatterOpDynamic</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Updates the value of the input tensor through the subtraction operation.</span>

<span class="sd">    Using given values to update tensor value through the subtraction operation, along with the input indices.</span>
<span class="sd">    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.</span>

<span class="sd">    for each `i, ..., j` in `indices.shape`:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{-}= \text{updates}[i, ..., j, :]</span>

<span class="sd">    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.</span>
<span class="sd">    If they have different data types, the lower priority data type will be converted to</span>
<span class="sd">    the relatively highest priority data type.</span>

<span class="sd">    Args:</span>
<span class="sd">        use_locking (bool): Whether to protect the assignment by a lock. Default: False.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Parameter) - The target tensor, with data type of Parameter.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.</span>
<span class="sd">        - **updates** (Tensor) - The tensor doing the min operation with `input_x`,</span>
<span class="sd">          the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the updated `input_x`, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `use_locking` is not a bool.</span>
<span class="sd">        TypeError: If `indices` is not an int32.</span>
<span class="sd">        ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.</span>
<span class="sd">        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter</span>
<span class="sd">                      is required when data type conversion of Parameter is not supported.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``CPU`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_sub = ops.ScatterSub()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_sub(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[-1. -1. -1.]</span>
<span class="sd">         [-1. -1. -1.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[0, 1], [1, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [0.0, 0.0, 0.0] - [1.0, 1.0, 1.0] = [-1.0, -1.0, -1.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [0.0, 0.0, 0.0] - [3.0, 3.0, 3.0] = [-3.0, -3.0, -3.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [1, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [-3.0, -3.0, -3.0] - [7.0, 7.0, 7.0] = [-10.0, -10.0, -10.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [-10.0, -10.0, -10.0] - [9.0, 9.0, 9.0] = [-19.0, -19.0, -19.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_sub = ops.ScatterSub()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_sub(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ -1.  -1.  -1.]</span>
<span class="sd">         [-19. -19. -19.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[1, 0], [1, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [1, 0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [0.0, 0.0, 0.0] - [3.0, 3.0, 3.0] = [-3.0, -3.0, -3.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [0.0, 0.0, 0.0] - [1.0, 1.0, 1.0] = [-1.0, -1.0, -1.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [1, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [-1.0, -1.0, -1.0] - [7.0, 7.0, 7.0] = [-8.0, -8.0, -8.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [-8.0, -8.0, -8.0] - [9.0, 9.0, 9.0] = [-17.0, -17.0, -17.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_sub = ops.ScatterSub()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_sub(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ -3.  -3.  -3.]</span>
<span class="sd">         [-17. -17. -17.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[0, 1], [0, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [0.0, 0.0, 0.0] - [1.0, 1.0, 1.0] = [-1.0, -1.0, -1.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [0.0, 0.0, 0.0] - [3.0, 3.0, 3.0] = [-3.0, -3.0, -3.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [-1.0, -1.0, -1.0] - [7.0, 7.0, 7.0] = [-8.0, -8.0, -8.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [-3.0, -3.0, -3.0] - [9.0, 9.0, 9.0] = [-12.0, -12.0, -12.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 1], [0, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_sub = ops.ScatterSub()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_sub(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ -8.  -8.  -8.]</span>
<span class="sd">         [-12. -12. -12.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">use_locking</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ScatterSub&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;use_locking&#39;</span><span class="p">,</span> <span class="n">use_locking</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;side_effect_mem&#39;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span></div>


<div class="viewcode-block" id="ScatterMul"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterMul.html#mindspore.ops.ScatterMul">[docs]</a><span class="k">class</span> <span class="nc">ScatterMul</span><span class="p">(</span><span class="n">_ScatterOp</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Updates the value of the input tensor through the multiply operation.</span>

<span class="sd">    Using given values to update tensor value through the mul operation, along with the input indices.</span>
<span class="sd">    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.</span>

<span class="sd">    for each `i, ..., j` in `indices.shape`:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{*}= \text{updates}[i, ..., j, :]</span>

<span class="sd">    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.</span>
<span class="sd">    If they have different data types, the lower priority data type will be converted to</span>
<span class="sd">    the relatively highest priority data type.</span>

<span class="sd">    Args:</span>
<span class="sd">        use_locking (bool): Whether to protect the assignment by a lock. Default: False.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Parameter) - The target tensor, with data type of Parameter.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.</span>
<span class="sd">        - **updates** (Tensor) - The tensor doing the min operation with `input_x`,</span>
<span class="sd">          the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the updated `input_x`, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `use_locking` is not a bool.</span>
<span class="sd">        TypeError: If `indices` is not an int32.</span>
<span class="sd">        ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.</span>
<span class="sd">        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter</span>
<span class="sd">                      is required when data type conversion of Parameter is not supported.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([0, 1]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_mul = ops.ScatterMul()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_mul(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[2. 2. 2.]</span>
<span class="sd">         [4. 4. 4.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[0, 1], [1, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [1.0, 1.0, 1.0] * [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [2.0, 2.0, 2.0] * [3.0, 3.0, 3.0] = [6.0, 6.0, 6.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [1, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [6.0, 6.0, 6.0] * [7.0, 7.0, 7.0] = [42.0, 42.0, 42.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [42.0, 42.0, 42.0] * [9.0, 9.0, 9.0] = [378.0, 378.0, 378.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_mul = ops.ScatterMul()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_mul(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[  1.   1.   1.]</span>
<span class="sd">         [378. 378. 378.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[1, 0], [1, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [1, 0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [1.0, 1.0, 1.0] * [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [2.0, 2.0, 2.0] * [1.0, 1.0, 1.0] = [2.0, 2.0, 2.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [1, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [2.0, 2.0, 2.0] * [7.0, 7.0, 7.0] = [14.0, 14.0, 14.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [14.0, 14.0, 14.0] * [9.0, 9.0, 9.0] = [126.0, 126.0, 126.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_mul = ops.ScatterMul()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_mul(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[  3.   3.   3.]</span>
<span class="sd">         [126. 126. 126.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[0, 1], [0, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [1.0, 1.0, 1.0] * [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [2.0, 2.0, 2.0] * [3.0, 3.0, 3.0] = [6.0, 6.0, 6.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [1.0, 1.0, 1.0] * [7.0, 7.0, 7.0] = [7.0, 7.0, 7.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [6.0, 6.0, 6.0] * [9.0, 9.0, 9.0] = [54.0, 54.0, 54.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 1], [0, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_mul = ops.ScatterMul()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_mul(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 7.  7.  7.]</span>
<span class="sd">         [54. 54. 54.]]</span>
<span class="sd">    &quot;&quot;&quot;</span></div>


<div class="viewcode-block" id="ScatterDiv"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterDiv.html#mindspore.ops.ScatterDiv">[docs]</a><span class="k">class</span> <span class="nc">ScatterDiv</span><span class="p">(</span><span class="n">_ScatterOp</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Updates the value of the input tensor through the divide operation.</span>

<span class="sd">    Using given values to update tensor value through the div operation, along with the input indices.</span>
<span class="sd">    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.</span>

<span class="sd">    for each `i, ..., j` in `indices.shape`:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{/}= \text{updates}[i, ..., j, :]</span>

<span class="sd">    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.</span>
<span class="sd">    If they have different data types, the lower priority data type will be converted to</span>
<span class="sd">    the relatively highest priority data type.</span>

<span class="sd">    Args:</span>
<span class="sd">        use_locking (bool): Whether to protect the assignment by a lock. Default: False.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Parameter) - The target tensor, with data type of Parameter.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.</span>
<span class="sd">        - **updates** (Tensor) - The tensor doing the min operation with `input_x`,</span>
<span class="sd">          the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the updated `input_x`, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `use_locking` is not a bool.</span>
<span class="sd">        TypeError: If `indices` is not an int32.</span>
<span class="sd">        ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.</span>
<span class="sd">        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter</span>
<span class="sd">                      is required when data type conversion of Parameter is not supported.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[6.0, 6.0, 6.0], [2.0, 2.0, 2.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([0, 1]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_div = ops.ScatterDiv()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_div(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[3. 3. 3.]</span>
<span class="sd">         [1. 1. 1.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[105.0, 105.0, 105.0],</span>
<span class="sd">        ...                                      [315.0, 315.0, 315.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[0, 1], [1, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [105.0, 105.0, 105.0] / [1.0, 1.0, 1.0] = [105.0, 105.0, 105.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [315.0, 315.0, 315.0] / [3.0, 3.0, 3.0] = [105.0, 105.0, 105.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [1, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [105.0, 105.0, 105.0] / [5.0, 5.0, 5.0] = [21.0, 21.0, 21.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [21.0, 21.0, 21.0] / [7.0, 7.0, 7.0] = [3.0, 3.0, 3.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[5.0, 5.0, 5.0], [7.0, 7.0, 7.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_div = ops.ScatterDiv()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_div(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[105. 105. 105.]</span>
<span class="sd">         [  3.   3.   3.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[105.0, 105.0, 105.0],</span>
<span class="sd">        ...                                      [315.0, 315.0, 315.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[1, 0], [1, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [1, 0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [105.0, 105.0, 105.0] / [3.0, 3.0, 3.0] = [35.0, 35.0, 35.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [315.0, 315.0, 315.0] / [1.0, 1.0, 1.0] = [315.0, 315.0, 315.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [1, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [315.0, 315.0, 315.0] / [5.0, 5.0, 5.0] = [63.0 63.0 63.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [63.0 63.0 63.0] / [7.0, 7.0, 7.0] = [9.0, 9.0, 9.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[5.0, 5.0, 5.0], [7.0, 7.0, 7.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_div = ops.ScatterDiv()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_div(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[35. 35. 35.]</span>
<span class="sd">         [ 9.  9.  9.]]</span>
<span class="sd">        &gt;&gt;&gt; # for input_x will be updated after the operation is completed. input_x need to be re-initialized.</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([[105.0, 105.0, 105.0],</span>
<span class="sd">        ...                                      [315.0, 315.0, 315.0]]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; # for indices = [[0, 1], [0, 1]]</span>
<span class="sd">        &gt;&gt;&gt; # step 1: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [105.0, 105.0, 105.0] / [1.0, 1.0, 1.0] = [105.0, 105.0, 105.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [315.0, 315.0, 315.0] / [3.0, 3.0, 3.0] = [105.0, 105.0, 105.0]</span>
<span class="sd">        &gt;&gt;&gt; # step 2: [0, 1]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[0] = [105.0, 105.0, 105.0] / [5.0, 5.0, 5.0] = [21.0, 21.0, 21.0]</span>
<span class="sd">        &gt;&gt;&gt; # input_x[1] = [105.0, 105.0, 105.0] / [7.0, 7.0, 7.0] = [15.0, 15.0, 15.0]</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 1], [0, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],</span>
<span class="sd">        ...                            [[5.0, 5.0, 5.0], [7.0, 7.0, 7.0]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_div = ops.ScatterDiv()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_div(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[21. 21. 21.]</span>
<span class="sd">         [15. 15. 15.]]</span>
<span class="sd">    &quot;&quot;&quot;</span></div>


<div class="viewcode-block" id="ScatterNdAdd"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterNdAdd.html#mindspore.ops.ScatterNdAdd">[docs]</a><span class="k">class</span> <span class="nc">ScatterNdAdd</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Applies sparse addition to individual values or slices in a tensor.</span>

<span class="sd">    Using given values to update tensor value through the add operation, along with the input indices.</span>
<span class="sd">    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.</span>

<span class="sd">    `input_x` has rank P and `indices` has rank Q where `Q &gt;= 2`.</span>

<span class="sd">    `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N &lt;= P`.</span>

<span class="sd">    The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.</span>

<span class="sd">    `updates` is a tensor of rank `Q-1+P-N`. Its shape is:</span>
<span class="sd">    :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.</span>

<span class="sd">    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.</span>
<span class="sd">    If they have different data types, the lower priority data type will be converted to</span>
<span class="sd">    the relatively highest priority data type.</span>

<span class="sd">    Args:</span>
<span class="sd">        use_locking (bool): Whether to protect the assignment by a lock. Default: False.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Parameter) - The target tensor, with data type of Parameter.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.</span>
<span class="sd">          The rank of indices must be at least 2 and `indices_shape[-1] &lt;= len(shape)`.</span>
<span class="sd">        - **updates** (Tensor) - The tensor doing the min operation with `input_x`,</span>
<span class="sd">          the data type is same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the updated `input_x`, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `use_locking` is not a bool.</span>
<span class="sd">        TypeError: If `indices` is not an int32.</span>
<span class="sd">        ValueError: If the shape of `updates` is not equal to `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.</span>
<span class="sd">        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter</span>
<span class="sd">                      is required when data type conversion of Parameter is not supported.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_nd_add = ops.ScatterNdAdd()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_nd_add(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [ 1. 10.  9.  4. 12.  6.  7. 17.]</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.zeros((4, 4, 4)), mindspore.int32))</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0], [2]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],</span>
<span class="sd">        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_nd_add = ops.ScatterNdAdd()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_nd_add(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[1 1 1 1]</span>
<span class="sd">          [2 2 2 2]</span>
<span class="sd">          [3 3 3 3]</span>
<span class="sd">          [4 4 4 4]]</span>
<span class="sd">         [[0 0 0 0]</span>
<span class="sd">          [0 0 0 0]</span>
<span class="sd">          [0 0 0 0]</span>
<span class="sd">          [0 0 0 0]]</span>
<span class="sd">         [[5 5 5 5]</span>
<span class="sd">          [6 6 6 6]</span>
<span class="sd">          [7 7 7 7]</span>
<span class="sd">          [8 8 8 8]]</span>
<span class="sd">         [[0 0 0 0]</span>
<span class="sd">          [0 0 0 0]</span>
<span class="sd">          [0 0 0 0]</span>
<span class="sd">          [0 0 0 0]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="n">__mindspore_signature__</span> <span class="o">=</span> <span class="p">(</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="n">sig</span><span class="o">.</span><span class="n">sig_rw</span><span class="o">.</span><span class="n">RW_WRITE</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T</span><span class="p">),</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T1</span><span class="p">),</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;updates&#39;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T</span><span class="p">)</span>
    <span class="p">)</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">use_locking</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ScatterNdAdd&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;use_locking&#39;</span><span class="p">,</span> <span class="n">use_locking</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;side_effect_mem&#39;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span></div>


<div class="viewcode-block" id="ScatterNdSub"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterNdSub.html#mindspore.ops.ScatterNdSub">[docs]</a><span class="k">class</span> <span class="nc">ScatterNdSub</span><span class="p">(</span><span class="n">_ScatterNdOp</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Applies sparse subtraction to individual values or slices in a tensor.</span>

<span class="sd">    Using given values to update tensor value through the subtraction operation, along with the input indices.</span>
<span class="sd">    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.</span>

<span class="sd">    `input_x` has rank P and `indices` has rank Q where `Q &gt;= 2`.</span>

<span class="sd">    `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N &lt;= P`.</span>

<span class="sd">    The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.</span>

<span class="sd">    `updates` is a tensor of rank `Q-1+P-N`. Its shape is:</span>
<span class="sd">    :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.</span>

<span class="sd">    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.</span>
<span class="sd">    If they have different data types, the lower priority data type will be converted to the</span>
<span class="sd">    relatively highest priority data type.</span>

<span class="sd">    Args:</span>
<span class="sd">        use_locking (bool): Whether to protect the assignment by a lock. Default: False.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Parameter) - The target tensor, with data type of Parameter.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **indices** (Tensor) - The index of input tensor, with int32 data type.</span>
<span class="sd">          The rank of indices must be at least 2 and `indices_shape[-1] &lt;= len(shape)`.</span>
<span class="sd">        - **updates** (Tensor) - The tensor to be updated to the input tensor, has the same type as input.</span>
<span class="sd">          The shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `use_locking` is not a bool.</span>
<span class="sd">        TypeError: If `indices` is not an int32.</span>
<span class="sd">        ValueError: If the shape of `updates` is not equal to `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.</span>
<span class="sd">        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter</span>
<span class="sd">                      is required when data type conversion of Parameter is not supported.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_nd_sub = ops.ScatterNdSub()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_nd_sub(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [ 1. -6. -3.  4. -2.  6.  7. -1.]</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.zeros((4, 4, 4)), mindspore.int32))</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0], [2]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],</span>
<span class="sd">        ...                            [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_nd_sub = ops.ScatterNdSub()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_nd_sub(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[-1 -1 -1 -1]</span>
<span class="sd">          [-2 -2 -2 -2]</span>
<span class="sd">          [-3 -3 -3 -3]</span>
<span class="sd">          [-4 -4 -4 -4]]</span>
<span class="sd">         [[ 0  0  0  0]</span>
<span class="sd">          [ 0  0  0  0]</span>
<span class="sd">          [ 0  0  0  0]</span>
<span class="sd">          [ 0  0  0  0]]</span>
<span class="sd">         [[-5 -5 -5 -5]</span>
<span class="sd">          [-6 -6 -6 -6]</span>
<span class="sd">          [-7 -7 -7 -7]</span>
<span class="sd">          [-8 -8 -8 -8]]</span>
<span class="sd">         [[ 0  0  0  0]</span>
<span class="sd">          [ 0  0  0  0]</span>
<span class="sd">          [ 0  0  0  0]</span>
<span class="sd">          [ 0  0  0  0]]]</span>
<span class="sd">    &quot;&quot;&quot;</span></div>


<div class="viewcode-block" id="ScatterNonAliasingAdd"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ScatterNonAliasingAdd.html#mindspore.ops.ScatterNonAliasingAdd">[docs]</a><span class="k">class</span> <span class="nc">ScatterNonAliasingAdd</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Applies sparse addition to the input using individual values or slices.</span>

<span class="sd">    Using given values to update tensor value through the add operation, along with the input indices.</span>
<span class="sd">    This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.</span>

<span class="sd">    Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.</span>
<span class="sd">    If they have different data types, the lower priority data type will be converted to</span>
<span class="sd">    the relatively highest priority data type.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Parameter) - The target parameter. The data type must be float16, float32 or int32.</span>
<span class="sd">        - **indices** (Tensor) - The index to perform the addition operation whose data type must be mindspore.int32.</span>
<span class="sd">        - **updates** (Tensor) - The tensor that performs the addition operation with `input_x`,</span>
<span class="sd">          the data type is the same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Parameter, the updated `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `indices` is not int32.</span>
<span class="sd">        TypeError: If dtype of `input_x` is not one of float16, float32, int32.</span>
<span class="sd">        ValueError: If the shape of `updates` is not equal to `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.</span>
<span class="sd">        RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter</span>
<span class="sd">                      is required when data type conversion of Parameter is not supported.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name=&quot;x&quot;)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; scatter_non_aliasing_add = ops.ScatterNonAliasingAdd()</span>
<span class="sd">        &gt;&gt;&gt; output = scatter_non_aliasing_add(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [ 1. 10.  9.  4. 12.  6.  7. 17.]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="n">__mindspore_signature__</span> <span class="o">=</span> <span class="p">(</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="n">sig</span><span class="o">.</span><span class="n">sig_rw</span><span class="o">.</span><span class="n">RW_WRITE</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T</span><span class="p">),</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T1</span><span class="p">),</span>
        <span class="n">sig</span><span class="o">.</span><span class="n">make_sig</span><span class="p">(</span><span class="s1">&#39;updates&#39;</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">sig</span><span class="o">.</span><span class="n">sig_dtype</span><span class="o">.</span><span class="n">T</span><span class="p">)</span>
    <span class="p">)</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ScatterNonAliasingAdd&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;side_effect_mem&#39;</span><span class="p">,</span> <span class="kc">True</span><span class="p">)</span></div>


<div class="viewcode-block" id="SpaceToDepth"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.SpaceToDepth.html#mindspore.ops.SpaceToDepth">[docs]</a><span class="k">class</span> <span class="nc">SpaceToDepth</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Rearrange blocks of spatial data into depth.</span>

<span class="sd">    The output tensor&#39;s `height` dimension is :math:`height / block\_size`.</span>

<span class="sd">    The output tensor&#39;s `weight` dimension is :math:`weight / block\_size`.</span>

<span class="sd">    The depth of output tensor is :math:`block\_size * block\_size * input\_depth`.</span>

<span class="sd">    The input tensor&#39;s height and width must be divisible by `block_size`.</span>
<span class="sd">    The data format is &quot;NCHW&quot;.</span>

<span class="sd">    Args:</span>
<span class="sd">        block_size (int): The block size used to divide spatial data. It must be &gt;= 2.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - The target tensor. The data type is Number. It must be a 4-D tensor.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the same data type as `x`. It must be a 4-D tensor. Tensor of shape</span>
<span class="sd">        :math:`(N, ( C_{in} * \text{block_size} * 2), H_{in} / \text{block_size}, W_{in} / \text{block_size})`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `block_size` is not an int.</span>
<span class="sd">        ValueError: If `block_size` is less than 2.</span>
<span class="sd">        ValueError: If length of shape of `x` is not equal to 4.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.random.rand(1,3,2,2), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; block_size = 2</span>
<span class="sd">        &gt;&gt;&gt; space_to_depth = ops.SpaceToDepth(block_size)</span>
<span class="sd">        &gt;&gt;&gt; output = space_to_depth(x)</span>
<span class="sd">        &gt;&gt;&gt; print(output.shape)</span>
<span class="sd">        (1, 12, 1, 1)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">block_size</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize SpaceToDepth&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;block_size&#39;</span><span class="p">,</span> <span class="n">block_size</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;block_size&#39;</span><span class="p">,</span> <span class="n">block_size</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span> <span class="o">=</span> <span class="n">block_size</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s2">&quot;data_format&quot;</span><span class="p">,</span> <span class="s2">&quot;NCHW&quot;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;x dimension&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">),</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">)</span>
        <span class="n">out_shape</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">2</span><span class="p">):</span>
            <span class="k">if</span> <span class="n">out_shape</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="mi">2</span><span class="p">]</span> <span class="o">%</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">:</span>
                <span class="n">msg_prefix</span> <span class="o">=</span> <span class="s2">&quot;2nd&quot;</span> <span class="k">if</span> <span class="n">i</span> <span class="o">+</span> <span class="mi">2</span> <span class="o">==</span> <span class="mi">2</span> <span class="k">else</span> <span class="s2">&quot;3rd&quot;</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the shape of output with index </span><span class="si">{</span><span class="n">i</span> <span class="o">+</span> <span class="mi">2</span><span class="si">}</span><span class="s2"> must be divided &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;exactly by &#39;block_size&#39;, but got the </span><span class="si">{</span><span class="n">msg_prefix</span><span class="si">}</span><span class="s2"> dimension &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;of output: </span><span class="si">{</span><span class="n">out_shape</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="mi">2</span><span class="p">]</span><span class="si">}</span><span class="s2"> and &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;&#39;block_size&#39;: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">block_size</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
            <span class="n">out_shape</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="mi">2</span><span class="p">]</span> <span class="o">//=</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span>

        <span class="n">out_shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span>
        <span class="k">return</span> <span class="n">out_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;x_dtype&quot;</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_dtype</span></div>


<div class="viewcode-block" id="DepthToSpace"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.DepthToSpace.html#mindspore.ops.DepthToSpace">[docs]</a><span class="k">class</span> <span class="nc">DepthToSpace</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Rearrange blocks of depth data into spatial dimensions.</span>

<span class="sd">    This is the reverse operation of SpaceToDepth.</span>

<span class="sd">    The depth of output tensor is :math:`input\_depth / (block\_size * block\_size)`.</span>

<span class="sd">    The output tensor&#39;s `height` dimension is :math:`height * block\_size`.</span>

<span class="sd">    The output tensor&#39;s `weight` dimension is :math:`weight * block\_size`.</span>

<span class="sd">    The input tensor&#39;s depth must be divisible by `block_size * block_size`.</span>
<span class="sd">    The data format is &quot;NCHW&quot;.</span>

<span class="sd">    Args:</span>
<span class="sd">        block_size (int): The block size used to divide depth data. It must be &gt;= 2.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - The target tensor. It must be a 4-D tensor with shape :math:`(N, C_{in}, H_{in}, W_{in})`.</span>
<span class="sd">          The data type is Number.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor of shape :math:`(N, C_{in} / \text{block_size} ^ 2, H_{in} * \text{block_size},</span>
<span class="sd">        W_{in} * \text{block_size})`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `block_size` is not an int.</span>
<span class="sd">        ValueError: If `block_size` is less than 2.</span>
<span class="sd">        ValueError: If length of shape of `x` is not equal to 4.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.random.rand(1, 12, 1, 1), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; block_size = 2</span>
<span class="sd">        &gt;&gt;&gt; depth_to_space = ops.DepthToSpace(block_size)</span>
<span class="sd">        &gt;&gt;&gt; output = depth_to_space(x)</span>
<span class="sd">        &gt;&gt;&gt; print(output.shape)</span>
<span class="sd">        (1, 3, 2, 2)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">block_size</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize DepthToSpace&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;block_size&#39;</span><span class="p">,</span> <span class="n">block_size</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;block_size&#39;</span><span class="p">,</span> <span class="n">block_size</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span> <span class="o">=</span> <span class="n">block_size</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s2">&quot;data_format&quot;</span><span class="p">,</span> <span class="s2">&quot;NCHW&quot;</span><span class="p">)</span></div>


<div class="viewcode-block" id="SpaceToBatch"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.SpaceToBatch.html#mindspore.ops.SpaceToBatch">[docs]</a><span class="k">class</span> <span class="nc">SpaceToBatch</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    SpaceToBatch is deprecated. Please use :class:`mindspore.ops.SpaceToBatchND` instead.</span>
<span class="sd">    Divides spatial dimensions into blocks and combines the block size with the original batch.</span>

<span class="sd">    This operation will divide spatial dimensions (H, W) into blocks with `block_size`, the output tensor&#39;s H and W</span>
<span class="sd">    dimension is the corresponding number of blocks after division. The output tensor&#39;s batch dimension is the</span>
<span class="sd">    product of the original batch and the square of block_size. Before division, the spatial dimensions</span>
<span class="sd">    of the input are zero padded according to paddings if necessary.</span>

<span class="sd">    Args:</span>
<span class="sd">        block_size (int): The block size of dividing blocks with value greater than or equal to 2.</span>
<span class="sd">        paddings (Union[tuple, list]): The padding values for H and W dimension, containing 2 subtraction lists.</span>
<span class="sd">            Each subtraction list contains 2 integer value. All values must be greater than 0.</span>
<span class="sd">            paddings[i] specifies the paddings for the spatial dimension i, which corresponds to the</span>
<span class="sd">            input dimension i+2. It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1]</span>
<span class="sd">            is divisible by block_size.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor. The data type is Number.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the output tensor with the same data type as input. Assume input shape is :math:`(n, c, h, w)` with</span>
<span class="sd">        :math:`block\_size` and :math:`paddings`. The shape of the output tensor will be :math:`(n&#39;, c&#39;, h&#39;, w&#39;)`,</span>
<span class="sd">        where</span>

<span class="sd">        :math:`n&#39; = n*(block\_size*block\_size)`</span>

<span class="sd">        :math:`c&#39; = c`</span>

<span class="sd">        :math:`h&#39; = (h+paddings[0][0]+paddings[0][1])//block\_size`</span>

<span class="sd">        :math:`w&#39; = (w+paddings[1][0]+paddings[1][1])//block\_size`</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `block_size` is not an int.</span>
<span class="sd">        ValueError: If `block_size` is less than 2.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        Deprecated</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; block_size = 2</span>
<span class="sd">        &gt;&gt;&gt; paddings = [[0, 0], [0, 0]]</span>
<span class="sd">        &gt;&gt;&gt; space_to_batch = ops.SpaceToBatch(block_size, paddings)</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = space_to_batch(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[[1.]]]</span>
<span class="sd">         [[[2.]]]</span>
<span class="sd">         [[[3.]]]</span>
<span class="sd">         [[[4.]]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">block_size</span><span class="p">,</span> <span class="n">paddings</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize SpaceToBatch&quot;&quot;&quot;</span>
        <span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span><span class="s2">&quot;WARN_DEPRECATED: The usage of SpaceToBatch is deprecated.&quot;</span>
                       <span class="s2">&quot; Please use SpaceToBatchND.&quot;</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;block_size&#39;</span><span class="p">,</span> <span class="n">block_size</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;block_size&#39;</span><span class="p">,</span> <span class="n">block_size</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span> <span class="o">=</span> <span class="n">block_size</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;paddings shape&#39;</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">paddings</span><span class="p">)</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">elem</span> <span class="ow">in</span> <span class="n">itertools</span><span class="o">.</span><span class="n">chain</span><span class="p">(</span><span class="o">*</span><span class="n">paddings</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">elem</span><span class="p">,</span> <span class="s1">&#39;paddings element&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;paddings element&#39;</span><span class="p">,</span> <span class="n">elem</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">paddings</span> <span class="o">=</span> <span class="n">paddings</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_dtype</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_equal_int</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">),</span> <span class="mi">4</span><span class="p">,</span> <span class="s1">&#39;rank of input_x&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">out_shape</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">2</span><span class="p">):</span>
            <span class="n">padded</span> <span class="o">=</span> <span class="n">out_shape</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="mi">2</span><span class="p">]</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">paddings</span><span class="p">[</span><span class="n">i</span><span class="p">][</span><span class="mi">0</span><span class="p">]</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">paddings</span><span class="p">[</span><span class="n">i</span><span class="p">][</span><span class="mi">1</span><span class="p">]</span>
            <span class="k">if</span> <span class="n">padded</span> <span class="o">%</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">:</span>
                <span class="n">msg_ndim</span> <span class="o">=</span> <span class="s2">&quot;2nd&quot;</span> <span class="k">if</span> <span class="n">i</span> <span class="o">+</span> <span class="mi">2</span> <span class="o">==</span> <span class="mi">2</span> <span class="k">else</span> <span class="s2">&quot;3rd&quot;</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the shape of the output tensor should be &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;divisible by &#39;block_size&#39;, but got the </span><span class="si">{</span><span class="n">msg_ndim</span><span class="si">}</span><span class="s2"> dimension of output: </span><span class="si">{</span><span class="n">padded</span><span class="si">}</span><span class="s2"> and &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;&#39;block_size&#39;: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">block_size</span><span class="si">}</span><span class="s2">. Please check the official homepage &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;for more information about the output tensor.&quot;</span><span class="p">)</span>
            <span class="n">out_shape</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="mi">2</span><span class="p">]</span> <span class="o">=</span> <span class="n">padded</span> <span class="o">//</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span>
        <span class="n">out_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span>
        <span class="k">return</span> <span class="n">out_shape</span></div>


<div class="viewcode-block" id="BatchToSpace"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.BatchToSpace.html#mindspore.ops.BatchToSpace">[docs]</a><span class="k">class</span> <span class="nc">BatchToSpace</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.</span>

<span class="sd">    This operation will divide batch dimension N into blocks with block_size, the output tensor&#39;s N dimension</span>
<span class="sd">    is the corresponding number of blocks after division. The output tensor&#39;s H, W dimension is product of</span>
<span class="sd">    original H, W dimension and block_size with given amount to crop from dimension, respectively.</span>

<span class="sd">    Args:</span>
<span class="sd">        block_size (int): The block size of division, has the value not less than 2.</span>
<span class="sd">        crops (Union[list(int), tuple(int)]): The crop value for H and W dimension, containing 2 subtraction lists.</span>
<span class="sd">            Each list contains 2 integers.</span>
<span class="sd">            All values must be not less than 0. crops[i] specifies the crop values for the spatial dimension i, which</span>
<span class="sd">            corresponds to the input dimension i+2. It is required that</span>

<span class="sd">            :math:`input\_shape[i+2]*block\_size &gt;= crops[i][0]+crops[i][1]`</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor, dimension 0 must be divisible by</span>
<span class="sd">          product of `block_shape`. The data type is float16 or float32.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the output tensor with the same type as input. Assume input shape is (n, c, h, w) with block_size</span>
<span class="sd">        and crops. The output shape will be (n&#39;, c&#39;, h&#39;, w&#39;), where</span>

<span class="sd">        :math:`n&#39; = n//(block\_size*block\_size)`</span>

<span class="sd">        :math:`c&#39; = c`</span>

<span class="sd">        :math:`h&#39; = h*block\_size-crops[0][0]-crops[0][1]`</span>

<span class="sd">        :math:`w&#39; = w*block\_size-crops[1][0]-crops[1][1]`</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `block_size` or element of `crops` is not an int.</span>
<span class="sd">        TypeError: If `crops` is neither list nor tuple.</span>
<span class="sd">        ValueError: If `block_size` is less than 2.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; block_size = 2</span>
<span class="sd">        &gt;&gt;&gt; crops = [[0, 0], [0, 0]]</span>
<span class="sd">        &gt;&gt;&gt; batch_to_space = ops.BatchToSpace(block_size, crops)</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = batch_to_space(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[[1.  2.]</span>
<span class="sd">           [3.  4.]]]]</span>

<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">block_size</span><span class="p">,</span> <span class="n">crops</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize BatchToSpace&quot;&quot;&quot;</span>
        <span class="n">logger</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span><span class="s2">&quot;WARN_DEPRECATED: The usage of BatchToSpace is deprecated.&quot;</span>
                       <span class="s2">&quot; Please use BatchToSpaceND.&quot;</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;block_size&#39;</span><span class="p">,</span> <span class="n">block_size</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;block_size&#39;</span><span class="p">,</span> <span class="n">block_size</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span> <span class="o">=</span> <span class="n">block_size</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;crops type&#39;</span><span class="p">,</span> <span class="n">crops</span><span class="p">,</span> <span class="p">[</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;crops shape&#39;</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">crops</span><span class="p">)</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">))</span>
        <span class="k">for</span> <span class="n">elem</span> <span class="ow">in</span> <span class="n">itertools</span><span class="o">.</span><span class="n">chain</span><span class="p">(</span><span class="o">*</span><span class="n">crops</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">elem</span><span class="p">,</span> <span class="s1">&#39;crops element&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;crops element&#39;</span><span class="p">,</span> <span class="n">elem</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">crops</span> <span class="o">=</span> <span class="n">crops</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_dtype</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;rank of input_x&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">),</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">4</span><span class="p">)</span>
        <span class="n">out_shape</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">2</span><span class="p">):</span>
            <span class="n">x_block_prod</span> <span class="o">=</span> <span class="n">out_shape</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="mi">2</span><span class="p">]</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span>
            <span class="n">crops_sum</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">crops</span><span class="p">[</span><span class="n">i</span><span class="p">][</span><span class="mi">0</span><span class="p">]</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">crops</span><span class="p">[</span><span class="n">i</span><span class="p">][</span><span class="mi">1</span><span class="p">]</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;x block shape prod&quot;</span><span class="p">,</span> <span class="n">x_block_prod</span><span class="p">,</span> <span class="s1">&#39;crops sum&#39;</span><span class="p">,</span> <span class="n">crops_sum</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GT</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">out_shape</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="mi">2</span><span class="p">]</span> <span class="o">=</span> <span class="n">x_block_prod</span> <span class="o">-</span> <span class="n">crops_sum</span>
        <span class="n">block_size_prod</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_size</span>
        <span class="k">if</span> <span class="n">out_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">%</span> <span class="n">block_size_prod</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the shape of output with index 0 must be divided exactly &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;by block_size_prod, but got the shape of output: </span><span class="si">{</span><span class="n">out_shape</span><span class="si">}</span><span class="s2"> and &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;block_size_prod: </span><span class="si">{</span><span class="n">block_size_prod</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="n">out_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">=</span> <span class="n">out_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">//</span> <span class="n">block_size_prod</span>
        <span class="k">return</span> <span class="n">out_shape</span></div>


<div class="viewcode-block" id="SpaceToBatchND"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.SpaceToBatchND.html#mindspore.ops.SpaceToBatchND">[docs]</a><span class="k">class</span> <span class="nc">SpaceToBatchND</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Divides spatial dimensions into blocks and combines the block size with the original batch.</span>

<span class="sd">    This operation will divide spatial dimensions (H, W) into blocks with block_shape, the output tensor&#39;s H and W</span>
<span class="sd">    dimension is the corresponding number of blocks after division. The output tensor&#39;s batch dimension is the</span>
<span class="sd">    product of the original batch and the product of `block_shape`. Before division,</span>
<span class="sd">    the spatial dimensions of the input are zero padded according to paddings if necessary.</span>

<span class="sd">    Args:</span>
<span class="sd">        block_shape (Union[list(int), tuple(int), int]): The block shape of dividing block with all value greater</span>
<span class="sd">            than 1. If `block_shape` is a tuple or list, the length of `block_shape` is M corresponding to the</span>
<span class="sd">            number of spatial dimensions. If `block_shape` is an int, the block size of M dimensions are the same,</span>
<span class="sd">            equal to `block_shape`. M must be 2.</span>
<span class="sd">        paddings (Union[tuple, list]): The padding values for H and W dimension, containing 2 subtraction list.</span>
<span class="sd">            Each contains 2 integer value. All values must be greater than 0.</span>
<span class="sd">            `paddings[i]` specifies the paddings for the spatial dimension i,</span>
<span class="sd">            which corresponds to the input dimension i+2.</span>
<span class="sd">            It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1] is divisible by block_shape[i].</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the output tensor with the same data type as input. Assume input shape is :math:`(n, c, h, w)` with</span>
<span class="sd">        :math:`block\_shape` and :math:`paddings`. The shape of the output tensor will be :math:`(n&#39;, c&#39;, h&#39;, w&#39;)`,</span>
<span class="sd">        where</span>

<span class="sd">        :math:`n&#39; = n*(block\_shape[0]*block\_shape[1])`</span>

<span class="sd">        :math:`c&#39; = c`</span>

<span class="sd">        :math:`h&#39; = (h+paddings[0][0]+paddings[0][1])//block\_shape[0]`</span>

<span class="sd">        :math:`w&#39; = (w+paddings[1][0]+paddings[1][1])//block\_shape[1]`</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `block_shape` is not one of list, tuple, int.</span>
<span class="sd">        TypeError: If `paddings` is neither list nor tuple.</span>
<span class="sd">        ValueError: If length of shape of `block_shape` is not equal to 1.</span>
<span class="sd">        ValueError: If length of `block_shape` or `paddings` is not equal to 2.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; block_shape = [2, 2]</span>
<span class="sd">        &gt;&gt;&gt; paddings = [[0, 0], [0, 0]]</span>
<span class="sd">        &gt;&gt;&gt; space_to_batch_nd = ops.SpaceToBatchND(block_shape, paddings)</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = space_to_batch_nd(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[[1.]]]</span>
<span class="sd">         [[[2.]]]</span>
<span class="sd">         [[[3.]]]</span>
<span class="sd">         [[[4.]]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">block_shape</span><span class="p">,</span> <span class="n">paddings</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize SpaceToBatchND&quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">block_shape</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span>
            <span class="n">block_shape</span> <span class="o">=</span> <span class="p">(</span><span class="n">block_shape</span><span class="p">,)</span> <span class="o">*</span> <span class="mi">2</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s2">&quot;block_shape&quot;</span><span class="p">,</span> <span class="n">block_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;block_shape type&#39;</span><span class="p">,</span> <span class="n">block_shape</span><span class="p">,</span> <span class="p">[</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;block_shape shape&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">block_shape</span><span class="p">)</span><span class="o">.</span><span class="n">shape</span><span class="p">),</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">block_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">block_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;block_shape length&#39;</span><span class="p">,</span> <span class="n">block_rank</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">elem</span> <span class="ow">in</span> <span class="n">block_shape</span><span class="p">:</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;block_shape element&#39;</span><span class="p">,</span> <span class="n">elem</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;block_shape element&#39;</span><span class="p">,</span> <span class="n">elem</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">block_shape</span> <span class="o">=</span> <span class="n">block_shape</span>

        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;paddings type&#39;</span><span class="p">,</span> <span class="n">paddings</span><span class="p">,</span> <span class="p">[</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;paddings length&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">paddings</span><span class="p">),</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;paddings shape&#39;</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">paddings</span><span class="p">)</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="p">(</span><span class="n">block_rank</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">elem</span> <span class="ow">in</span> <span class="n">itertools</span><span class="o">.</span><span class="n">chain</span><span class="p">(</span><span class="o">*</span><span class="n">paddings</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">elem</span><span class="p">,</span> <span class="s1">&#39;paddings element&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;paddings element&#39;</span><span class="p">,</span> <span class="n">elem</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">paddings</span> <span class="o">=</span> <span class="n">paddings</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_dtype</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="n">x_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_equal_int</span><span class="p">(</span><span class="n">x_rank</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="s1">&#39;x_shape rank&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">out_shape</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>

        <span class="n">block_shape_prod</span> <span class="o">=</span> <span class="mi">1</span>
        <span class="n">offset</span> <span class="o">=</span> <span class="mi">2</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">block_shape</span><span class="p">)):</span>
            <span class="n">padded</span> <span class="o">=</span> <span class="n">out_shape</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="n">offset</span><span class="p">]</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">paddings</span><span class="p">[</span><span class="n">i</span><span class="p">][</span><span class="mi">0</span><span class="p">]</span> <span class="o">+</span> \
                     <span class="bp">self</span><span class="o">.</span><span class="n">paddings</span><span class="p">[</span><span class="n">i</span><span class="p">][</span><span class="mi">1</span><span class="p">]</span>
            <span class="k">if</span> <span class="n">padded</span> <span class="o">%</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the padded should be divisible by &#39;block_shape&#39;, &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;where padded = input_x_shape[i + 2] + paddings[i][0] + paddings[i][1], &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;but got input_x_shape[</span><span class="si">{</span><span class="n">i</span> <span class="o">+</span> <span class="mi">2</span><span class="si">}</span><span class="s2">]: </span><span class="si">{</span><span class="n">out_shape</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="n">offset</span><span class="p">]</span><span class="si">}</span><span class="s2">, &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;paddings[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">][0]: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">paddings</span><span class="p">[</span><span class="n">i</span><span class="p">][</span><span class="mi">0</span><span class="p">]</span><span class="si">}</span><span class="s2"> and paddings[</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">][1]: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">paddings</span><span class="p">[</span><span class="n">i</span><span class="p">][</span><span class="mi">1</span><span class="p">]</span><span class="si">}</span><span class="s2">.&quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot; Please check the official api documents for &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;more information about the output tensor.&quot;</span><span class="p">)</span>
            <span class="n">out_shape</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="n">offset</span><span class="p">]</span> <span class="o">=</span> <span class="n">padded</span> <span class="o">//</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
            <span class="n">block_shape_prod</span> <span class="o">=</span> <span class="n">block_shape_prod</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">block_shape</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
        <span class="n">out_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">*=</span> <span class="n">block_shape_prod</span>
        <span class="k">return</span> <span class="n">out_shape</span></div>


<div class="viewcode-block" id="BatchToSpaceND"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.BatchToSpaceND.html#mindspore.ops.BatchToSpaceND">[docs]</a><span class="k">class</span> <span class="nc">BatchToSpaceND</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.</span>

<span class="sd">    This operation will divide batch dimension N into blocks with block_shape, the output tensor&#39;s N dimension</span>
<span class="sd">    is the corresponding number of blocks after division. The output tensor&#39;s H, W dimension is the product of</span>
<span class="sd">    original H, W dimension and block_shape with given amount to crop from dimension, respectively.</span>

<span class="sd">    Args:</span>
<span class="sd">        block_shape (Union[list(int), tuple(int), int]): The block shape of dividing block with all value greater</span>
<span class="sd">            than 1. If `block_shape` is a tuple or list, the length of `block_shape` is M corresponding to the</span>
<span class="sd">            number of spatial dimensions. If `block_shape` is an int, the block size of M dimensions are the same,</span>
<span class="sd">            equal to `block_shape`. M must be 2.</span>
<span class="sd">        crops (Union[list(int), tuple(int)]): The crop value for H and W dimension, containing 2 subtraction list,</span>
<span class="sd">            each containing 2 int value.</span>
<span class="sd">            All values must be &gt;= 0. crops[i] specifies the crop values for spatial dimension i, which corresponds to</span>
<span class="sd">            input dimension i+2. It is required that</span>

<span class="sd">            :math:`input\_shape[i+2]*block\_shape[i] &gt; crops[i][0]+crops[i][1]`</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor, dimension 0 must be divisible by</span>
<span class="sd">          product of `block_shape`. The data type is float16 or float32.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the output tensor with the same type as input. Assume input shape is (n, c, h, w) with block_shape</span>
<span class="sd">        and crops. The output shape will be (n&#39;, c&#39;, h&#39;, w&#39;), where</span>

<span class="sd">        :math:`n&#39; = n//(block\_shape[0]*block\_shape[1])`</span>

<span class="sd">        :math:`c&#39; = c`</span>

<span class="sd">        :math:`h&#39; = h*block\_shape[0]-crops[0][0]-crops[0][1]`</span>

<span class="sd">        :math:`w&#39; = w*block\_shape[1]-crops[1][0]-crops[1][1]`</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `block_shape` is not one of list, tuple, int.</span>
<span class="sd">        TypeError: If `crops` is neither list nor tuple.</span>
<span class="sd">        ValueError: If length of `block_shape` or `crops` is not equal to 2.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; block_shape = [2, 2]</span>
<span class="sd">        &gt;&gt;&gt; crops = [[0, 0], [0, 0]]</span>
<span class="sd">        &gt;&gt;&gt; batch_to_space_nd = ops.BatchToSpaceND(block_shape, crops)</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = batch_to_space_nd(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[[1.  2.]</span>
<span class="sd">           [3.  4.]]]]</span>

<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">block_shape</span><span class="p">,</span> <span class="n">crops</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize BatchToSpaceND&quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">block_shape</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span>
            <span class="n">block_shape</span> <span class="o">=</span> <span class="p">(</span><span class="n">block_shape</span><span class="p">,)</span> <span class="o">*</span> <span class="mi">2</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s2">&quot;block_shape&quot;</span><span class="p">,</span> <span class="n">block_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;block_shape type&#39;</span><span class="p">,</span> <span class="n">block_shape</span><span class="p">,</span> <span class="p">[</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;block_shape shape&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">block_shape</span><span class="p">)</span><span class="o">.</span><span class="n">shape</span><span class="p">),</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">block_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">block_shape</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;block_shape length&#39;</span><span class="p">,</span> <span class="n">block_rank</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">elem</span> <span class="ow">in</span> <span class="n">block_shape</span><span class="p">:</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;block_shape element&#39;</span><span class="p">,</span> <span class="n">elem</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;block_shape element&#39;</span><span class="p">,</span> <span class="n">elem</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">block_shape</span> <span class="o">=</span> <span class="n">block_shape</span>

        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;crops type&#39;</span><span class="p">,</span> <span class="n">crops</span><span class="p">,</span> <span class="p">[</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;crops length&#39;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">crops</span><span class="p">),</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;crops shape&#39;</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">crops</span><span class="p">)</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">,</span> <span class="p">(</span><span class="n">block_rank</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">elem</span> <span class="ow">in</span> <span class="n">itertools</span><span class="o">.</span><span class="n">chain</span><span class="p">(</span><span class="o">*</span><span class="n">crops</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">elem</span><span class="p">,</span> <span class="s1">&#39;crops element&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;crops element&#39;</span><span class="p">,</span> <span class="n">elem</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">crops</span> <span class="o">=</span> <span class="n">crops</span></div>


<span class="k">class</span> <span class="nc">BroadcastTo</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Broadcasts input tensor to a given shape.</span>

<span class="sd">    Input shape can be broadcast to target shape if for each dimension pair they are either equal or input is one or</span>
<span class="sd">    the target dimension is -1. In case of -1 in target shape, it will be replaced by the input shape&#39;s value</span>
<span class="sd">    in that dimension.</span>

<span class="sd">    When input shape is broadcast to target shape, it starts with the trailing</span>
<span class="sd">    dimensions. If there is a -1 in the target shape, the -1 cannot be in a leading,</span>
<span class="sd">    non-existing dimension.</span>

<span class="sd">    Args:</span>
<span class="sd">        shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position</span>
<span class="sd">            where it will be substituted by the input tensor&#39;s shape in that position, see example.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The input tensor. The data type should be one of the following types:</span>
<span class="sd">          float16, float32, int32, int8, uint8, bool.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, with the given `shape` and the same data type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `shape` is not a tuple.</span>
<span class="sd">        ValueError: if the target and input shapes are incompatible, or if a - 1 in the target shape is in an invalid</span>
<span class="sd">                    location.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; shape = (2, 3)</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([1, 2, 3]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; broadcast_to = ops.BroadcastTo(shape)</span>
<span class="sd">        &gt;&gt;&gt; output = broadcast_to(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 2. 3.]</span>
<span class="sd">         [1. 2. 3.]]</span>

<span class="sd">        &gt;&gt;&gt; shape = (-1, 2)</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[1], [2]]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; broadcast_to = ops.BroadcastTo(shape)</span>
<span class="sd">        &gt;&gt;&gt; output = broadcast_to(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 1.]</span>
<span class="sd">         [2. 2.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">shape</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize BroadcastTo&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;shape&quot;</span><span class="p">,</span> <span class="n">shape</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">),</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;dimension of input_x&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">shape</span><span class="p">),</span> <span class="s2">&quot;&quot;</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GT</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">ix</span><span class="p">,</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">shape</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s1">&#39;target shape index -&gt; &#39;</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">ix</span><span class="p">),</span> <span class="n">i</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;shape element&quot;</span><span class="p">,</span> <span class="n">i</span><span class="p">,</span> <span class="s2">&quot;shape element min limit&quot;</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">shape</span> <span class="o">=</span> <span class="n">shape</span>


<span class="k">class</span> <span class="nc">Meshgrid</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Generates coordinate matrices from given coordinate tensors.</span>

<span class="sd">    Given N one-dimensional coordinate tensors, returns a tuple outputs of N N-D</span>
<span class="sd">    coordinate tensors for evaluating expressions on an N-D grid.</span>

<span class="sd">    Args:</span>
<span class="sd">        indexing (&#39;xy&#39;, &#39;ij&#39;, optional): Cartesian (&#39;xy&#39;, default) or</span>
<span class="sd">            matrix (&#39;ij&#39;) indexing of output. In the 2-D case with</span>
<span class="sd">            inputs of length `M` and `N`, the outputs are of shape `(N, M)`</span>
<span class="sd">            for &#39;xy&#39; indexing and `(M, N)` for &#39;ij&#39; indexing. In the 3-D</span>
<span class="sd">            case with inputs of length `M`, `N` and `P`, outputs are of shape</span>
<span class="sd">            `(N, M, P)` for &#39;xy&#39; indexing and `(M, N, P)` for &#39;ij&#39; indexing.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input** (Union[tuple]) - A Tuple of N 1-D Tensor objects.</span>
<span class="sd">          The length of input should be greater than 1. The data type is Number.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensors, A Tuple of N N-D Tensor objects. The data type is the same with the Inputs.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `indexing` is not a str or `input` is not a tuple.</span>
<span class="sd">        ValueError: If `indexing` is neither &#39;xy&#39; nor &#39;ij&#39;.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; y = Tensor(np.array([5, 6, 7]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))</span>
<span class="sd">        &gt;&gt;&gt; inputs = (x, y, z)</span>
<span class="sd">        &gt;&gt;&gt; meshgrid = ops.Meshgrid(indexing=&quot;xy&quot;)</span>
<span class="sd">        &gt;&gt;&gt; output = meshgrid(inputs)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[3, 4, 5], dtype=Int32, value=</span>
<span class="sd">         [[[1, 1, 1, 1, 1],</span>
<span class="sd">           [2, 2, 2, 2, 2],</span>
<span class="sd">           [3, 3, 3, 3, 3],</span>
<span class="sd">           [4, 4, 4, 4, 4]],</span>
<span class="sd">          [[1, 1, 1, 1, 1],</span>
<span class="sd">           [2, 2, 2, 2, 2],</span>
<span class="sd">           [3, 3, 3, 3, 3],</span>
<span class="sd">           [4, 4, 4, 4, 4]],</span>
<span class="sd">          [[1, 1, 1, 1, 1],</span>
<span class="sd">           [2, 2, 2, 2, 2],</span>
<span class="sd">           [3, 3, 3, 3, 3],</span>
<span class="sd">           [4, 4, 4, 4, 4]]]),</span>
<span class="sd">         Tensor(shape=[3, 4, 5], dtype=Int32, value=</span>
<span class="sd">         [[[5, 5, 5, 5, 5],</span>
<span class="sd">           [5, 5, 5, 5, 5],</span>
<span class="sd">           [5, 5, 5, 5, 5],</span>
<span class="sd">           [5, 5, 5, 5, 5]],</span>
<span class="sd">          [[6, 6, 6, 6, 6],</span>
<span class="sd">           [6, 6, 6, 6, 6],</span>
<span class="sd">           [6, 6, 6, 6, 6],</span>
<span class="sd">           [6, 6, 6, 6, 6]],</span>
<span class="sd">          [[7, 7, 7, 7, 7],</span>
<span class="sd">           [7, 7, 7, 7, 7],</span>
<span class="sd">           [7, 7, 7, 7, 7],</span>
<span class="sd">           [7, 7, 7, 7, 7]]]),</span>
<span class="sd">         Tensor(shape=[3, 4, 5], dtype=Int32, value=</span>
<span class="sd">         [[[8, 9, 0, 1, 2],</span>
<span class="sd">           [8, 9, 0, 1, 2],</span>
<span class="sd">           [8, 9, 0, 1, 2],</span>
<span class="sd">           [8, 9, 0, 1, 2]],</span>
<span class="sd">          [[8, 9, 0, 1, 2],</span>
<span class="sd">           [8, 9, 0, 1, 2],</span>
<span class="sd">           [8, 9, 0, 1, 2],</span>
<span class="sd">           [8, 9, 0, 1, 2]],</span>
<span class="sd">          [[8, 9, 0, 1, 2],</span>
<span class="sd">           [8, 9, 0, 1, 2],</span>
<span class="sd">           [8, 9, 0, 1, 2],</span>
<span class="sd">           [8, 9, 0, 1, 2]]]))</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">indexing</span><span class="o">=</span><span class="s2">&quot;xy&quot;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Meshgrid.&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;indexing&quot;</span><span class="p">,</span> <span class="n">indexing</span><span class="p">,</span> <span class="p">(</span><span class="nb">str</span><span class="p">),</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_string</span><span class="p">(</span><span class="n">indexing</span><span class="o">.</span><span class="n">lower</span><span class="p">(),</span> <span class="p">[</span><span class="s2">&quot;xy&quot;</span><span class="p">,</span> <span class="s2">&quot;ij&quot;</span><span class="p">],</span> <span class="s2">&quot;indexing&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">indexing</span> <span class="o">=</span> <span class="n">indexing</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;shape&quot;</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">),</span> <span class="mi">2</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">GE</span><span class="p">,</span> <span class="s2">&quot;len of input&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">n</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="n">shape_0</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="k">for</span> <span class="n">s</span> <span class="ow">in</span> <span class="n">x_shape</span><span class="p">:</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">s</span><span class="p">),</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="s1">&#39;each input rank&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="n">shape_0</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">s</span><span class="p">[</span><span class="mi">0</span><span class="p">])</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">indexing</span> <span class="o">==</span> <span class="s2">&quot;xy&quot;</span><span class="p">:</span>
            <span class="n">shape_0</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">shape_0</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">=</span> <span class="n">shape_0</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">shape_0</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
        <span class="n">out_shape</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="nb">tuple</span><span class="p">(</span><span class="n">shape_0</span><span class="p">)</span> <span class="k">for</span> <span class="n">_</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n</span><span class="p">))</span>
        <span class="k">return</span> <span class="n">out_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_type</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;input[0]&quot;</span><span class="p">,</span> <span class="n">x_type</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">n</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_type</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">n</span><span class="p">):</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;x_type[</span><span class="si">%d</span><span class="s1">]&#39;</span> <span class="o">%</span> <span class="n">i</span><span class="p">,</span> <span class="n">x_type</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="s1">&#39;base&#39;</span><span class="p">,</span> <span class="n">x_type</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="ne">TypeError</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_type</span>


<div class="viewcode-block" id="InplaceUpdate"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.InplaceUpdate.html#mindspore.ops.InplaceUpdate">[docs]</a><span class="k">class</span> <span class="nc">InplaceUpdate</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Updates specified rows with values in `v`.</span>

<span class="sd">    Args:</span>
<span class="sd">        indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of x</span>
<span class="sd">            to update with v. It is an int or tuple, whose value is in [0, the first dimension size of x).</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - A tensor which to be inplace updated. It can be one of the following data types:</span>
<span class="sd">          float32, float16 and int32.</span>
<span class="sd">        - **v** (Tensor) - A tensor with the same type as `x` and the same dimension size as `x` except</span>
<span class="sd">          the first dimension, which must be the same as the size of `indices`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, with the same type and shape as the input `x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `indices` is neither int nor tuple.</span>
<span class="sd">        TypeError: If `indices` is a tuple and its element is not an int.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; indices = (0, 1)</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; inplace_update = ops.InplaceUpdate(indices)</span>
<span class="sd">        &gt;&gt;&gt; output = inplace_update(x, v)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[0.5 1. ]</span>
<span class="sd">         [1.  1.5]</span>
<span class="sd">         [5.  6. ]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">indices</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize InplaceUpdate&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;v&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">indices</span> <span class="o">=</span> <span class="n">indices</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;indices&quot;</span><span class="p">,</span> <span class="n">indices</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">indices</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">indices</span> <span class="o">=</span> <span class="p">(</span><span class="n">indices</span><span class="p">,)</span>
        <span class="k">for</span> <span class="n">item</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">indices</span><span class="p">:</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;item of indices&quot;</span><span class="p">,</span> <span class="n">item</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">v_dtype</span><span class="p">):</span>
        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;x&#39;</span><span class="p">:</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="s1">&#39;v&#39;</span><span class="p">:</span> <span class="n">v_dtype</span><span class="p">}</span>
        <span class="n">valid_type</span> <span class="o">=</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="n">valid_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_dtype</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">v_shape</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">),</span> <span class="s2">&quot;v&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">v_shape</span><span class="p">),</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;size of indices&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">indices</span><span class="p">),</span> <span class="s2">&quot;v&#39;s first dimension&quot;</span><span class="p">,</span> <span class="n">v_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
                        <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">indices</span><span class="p">:</span>
            <span class="k">if</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="mi">0</span> <span class="ow">or</span> <span class="n">i</span> <span class="o">&gt;=</span> <span class="n">x_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the value of indices must be in [0, </span><span class="si">{</span><span class="n">x_shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="si">}</span><span class="s2">), &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="n">x_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">x_shape</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">idx</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">x_rank</span><span class="p">)[</span><span class="mi">1</span><span class="p">:]:</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s1">&#39;v dim </span><span class="si">%d</span><span class="s1">&#39;</span> <span class="o">%</span> <span class="n">idx</span><span class="p">,</span> <span class="n">v_shape</span><span class="p">[</span><span class="n">idx</span><span class="p">],</span> <span class="s2">&quot;x dim </span><span class="si">%d</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="n">idx</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">[</span><span class="n">idx</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x_shape</span></div>


<div class="viewcode-block" id="ReverseSequence"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ReverseSequence.html#mindspore.ops.ReverseSequence">[docs]</a><span class="k">class</span> <span class="nc">ReverseSequence</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Reverses variable length slices.</span>

<span class="sd">    Args:</span>
<span class="sd">        seq_dim (int): The dimension where reversal is performed. Required.</span>
<span class="sd">        batch_dim (int): The input is sliced in this dimension. Default: 0.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - The input to reverse, supporting all number types including bool.</span>
<span class="sd">        - **seq_lengths** (Tensor) - Must be a 1-D vector with int32 or int64 types.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Reversed tensor with the same shape and data type as input.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `seq_dim` or `batch_dim` is not an int.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; seq_lengths = Tensor(np.array([1, 2, 3]))</span>
<span class="sd">        &gt;&gt;&gt; reverse_sequence = ops.ReverseSequence(seq_dim=1)</span>
<span class="sd">        &gt;&gt;&gt; output = reverse_sequence(x, seq_lengths)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 2. 3.]</span>
<span class="sd">         [5. 4. 6.]</span>
<span class="sd">         [9. 8. 7.]]</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; seq_lengths = Tensor(np.array([1, 2, 3]))</span>
<span class="sd">        &gt;&gt;&gt; reverse_sequence = ops.ReverseSequence(seq_dim=0, batch_dim=1)</span>
<span class="sd">        &gt;&gt;&gt; output = reverse_sequence(x, seq_lengths)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 5. 9.]</span>
<span class="sd">         [4. 2. 6.]</span>
<span class="sd">         [7. 8. 3.]]</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; seq_lengths = Tensor(np.array([2, 2, 3]))</span>
<span class="sd">        &gt;&gt;&gt; reverse_sequence = ops.ReverseSequence(seq_dim=1)</span>
<span class="sd">        &gt;&gt;&gt; output = reverse_sequence(x, seq_lengths)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[2. 1. 3.]</span>
<span class="sd">         [5. 4. 6.]</span>
<span class="sd">         [9. 8. 7.]]</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; seq_lengths = Tensor(np.array([3, 2, 3]))</span>
<span class="sd">        &gt;&gt;&gt; reverse_sequence = ops.ReverseSequence(seq_dim=1)</span>
<span class="sd">        &gt;&gt;&gt; output = reverse_sequence(x, seq_lengths)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[3. 2. 1.]</span>
<span class="sd">         [5. 4. 6.]</span>
<span class="sd">         [9. 8. 7.]]</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; seq_lengths = Tensor(np.array([4, 4]))</span>
<span class="sd">        &gt;&gt;&gt; reverse_sequence = ops.ReverseSequence(seq_dim=1)</span>
<span class="sd">        &gt;&gt;&gt; output = reverse_sequence(x, seq_lengths)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[4. 3. 2. 1.]</span>
<span class="sd">         [8. 7. 6. 5.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">seq_dim</span><span class="p">,</span> <span class="n">batch_dim</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ReverseSequence&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;seq_lengths&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;seq_dim&quot;</span><span class="p">,</span> <span class="n">seq_dim</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">seq_dim_</span> <span class="o">=</span> <span class="n">seq_dim</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;batch_dim&quot;</span><span class="p">,</span> <span class="n">batch_dim</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">batch_dim_</span> <span class="o">=</span> <span class="n">batch_dim</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">seq_lengths</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;seq_dim&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">seq_dim_</span><span class="p">,</span> <span class="s2">&quot;x rank&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x</span><span class="p">),</span> <span class="n">Rel</span><span class="o">.</span><span class="n">LE</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;batch_dim&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">batch_dim_</span><span class="p">,</span> <span class="s2">&quot;x rank&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">x</span><span class="p">),</span> <span class="n">Rel</span><span class="o">.</span><span class="n">LE</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;batch_dim&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">batch_dim_</span><span class="p">,</span> <span class="s2">&quot;seq_dim&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">seq_dim_</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">NE</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;seq_lengths rank&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">seq_lengths</span><span class="p">),</span> <span class="s2">&quot;expected&quot;</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;seq_lengths vector size&quot;</span><span class="p">,</span> <span class="n">seq_lengths</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
                        <span class="s2">&quot;input size along batch_dim&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">batch_dim_</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">seq_lengths</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;x_dtype&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span> <span class="o">+</span> <span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">bool_</span><span class="p">,),</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;seq_lengths_dtype&quot;</span><span class="p">,</span> <span class="n">seq_lengths</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span></div>


<div class="viewcode-block" id="EditDistance"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.EditDistance.html#mindspore.ops.EditDistance">[docs]</a><span class="k">class</span> <span class="nc">EditDistance</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Computes the Levenshtein Edit Distance. It is used to measure the similarity of two sequences. The inputs are</span>
<span class="sd">    variable-length sequences provided by SparseTensors (hypothesis_indices, hypothesis_values, hypothesis_shape)</span>
<span class="sd">    and (truth_indices, truth_values, truth_shape).</span>

<span class="sd">    .. math::</span>

<span class="sd">        \operatorname{lev}_{a, b}(i, j)=\left\{\begin{array}{ll}</span>
<span class="sd">        \max (i, j)  \qquad \qquad \qquad \qquad \qquad \quad \  \text { if } \min (i, j)=0 \\</span>
<span class="sd">        \min \left\{\begin{array}{ll}</span>
<span class="sd">        \operatorname{lev}_{a, b}(i-1, j)+1 &amp; \\</span>
<span class="sd">        \operatorname{lev}_{a, b}(i, j-1)+1 &amp; \text { otherwise. } \\</span>
<span class="sd">        \operatorname{lev}_{a, b}(i-1, j-1)+1_{\left(a_{i} \neq b_{j}\right)}</span>
<span class="sd">        \end{array}\right. &amp;</span>
<span class="sd">        \end{array}\right.</span>

<span class="sd">    Where the :math:`a` indicates the hypothesis and the :math:`a` indicates the truth. For ease of understanding,</span>
<span class="sd">    i and j here in may be considered as lengths of a and b.</span>

<span class="sd">    Args:</span>
<span class="sd">        normalize (bool): If true, edit distances are normalized by length of truth. Default: True.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **hypothesis_indices** (Tensor) - The indices of the hypothesis list SparseTensor. With int64 data type.</span>
<span class="sd">          The shape of tensor is :math:`(N, R)`.</span>
<span class="sd">        - **hypothesis_values** (Tensor) - The values of the hypothesis list SparseTensor. With float32 data type.</span>
<span class="sd">          Must be 1-D vector with length of N.</span>
<span class="sd">        - **hypothesis_shape** (Tensor) - The shape of the hypothesis list SparseTensor.</span>
<span class="sd">          Must be R-length vector with int64 data type. Only constant value is allowed.</span>
<span class="sd">        - **truth_indices** (Tensor) - The indices of the truth list SparseTensor. With int64 data type.</span>
<span class="sd">          The shape of tensor is :math:`(M, R)`.</span>
<span class="sd">        - **truth_values** (Tensor) - The values of the truth list SparseTensor. Must be 1-D vector with length of M.</span>
<span class="sd">          With float32 data type.</span>
<span class="sd">        - **truth_shape** (Tensor) - The shape of the truth list SparseTensor.</span>
<span class="sd">          Must be R-length vector with int64 data type. Only constant value is allowed.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, a dense tensor with rank `R-1` and float32 data type.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `normalize` is not a bool.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; import numpy as np</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import context</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import Tensor</span>
<span class="sd">        &gt;&gt;&gt; import mindspore.nn as nn</span>
<span class="sd">        &gt;&gt;&gt; import mindspore.ops as ops</span>
<span class="sd">        &gt;&gt;&gt; class EditDistance(nn.Cell):</span>
<span class="sd">        ...     def __init__(self, hypothesis_shape, truth_shape, normalize=True):</span>
<span class="sd">        ...         super(EditDistance, self).__init__()</span>
<span class="sd">        ...         self.edit_distance = ops.EditDistance(normalize)</span>
<span class="sd">        ...         self.hypothesis_shape = hypothesis_shape</span>
<span class="sd">        ...         self.truth_shape = truth_shape</span>
<span class="sd">        ...</span>
<span class="sd">        ...     def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values):</span>
<span class="sd">        ...         return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape,</span>
<span class="sd">        ...                                   truth_indices, truth_values, self.truth_shape)</span>
<span class="sd">        ...</span>
<span class="sd">        &gt;&gt;&gt; hypothesis_indices = Tensor(np.array([[0, 0, 0], [1, 0, 1], [1, 1, 1]]).astype(np.int64))</span>
<span class="sd">        &gt;&gt;&gt; hypothesis_values = Tensor(np.array([1, 2, 3]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; hypothesis_shape = Tensor(np.array([1, 1, 2]).astype(np.int64))</span>
<span class="sd">        &gt;&gt;&gt; truth_indices = Tensor(np.array([[0, 1, 0], [0, 0, 1], [1, 1, 0], [1, 0, 1]]).astype(np.int64))</span>
<span class="sd">        &gt;&gt;&gt; truth_values = Tensor(np.array([1, 3, 2, 1]).astype(np.float32))</span>
<span class="sd">        &gt;&gt;&gt; truth_shape = Tensor(np.array([2, 2, 2]).astype(np.int64))</span>
<span class="sd">        &gt;&gt;&gt; edit_distance = EditDistance(hypothesis_shape, truth_shape)</span>
<span class="sd">        &gt;&gt;&gt; output = edit_distance(hypothesis_indices, hypothesis_values, truth_indices, truth_values)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1. 1.]</span>
<span class="sd">         [1. 1.]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">normalize</span><span class="o">=</span><span class="kc">True</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize EditDistance&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">normalize</span> <span class="o">=</span> <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;normalize&quot;</span><span class="p">,</span> <span class="n">normalize</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">set_const_input_indexes</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span> <span class="mi">5</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">h_indices</span><span class="p">,</span> <span class="n">h_values</span><span class="p">,</span> <span class="n">h_shape</span><span class="p">,</span> <span class="n">truth_indices</span><span class="p">,</span> <span class="n">truth_values</span><span class="p">,</span> <span class="n">truth_shape</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_valid_input</span><span class="p">(</span><span class="s1">&#39;hypothesis_shape&#39;</span><span class="p">,</span> <span class="n">h_shape</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_valid_input</span><span class="p">(</span><span class="s1">&#39;truth_shape&#39;</span><span class="p">,</span> <span class="n">truth_shape</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">args_int</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;hypothesis_indices&quot;</span><span class="p">:</span> <span class="n">h_indices</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="s2">&quot;hypothesis_shape&quot;</span><span class="p">:</span> <span class="n">h_shape</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
                    <span class="s2">&quot;truth_indices&quot;</span><span class="p">:</span> <span class="n">truth_indices</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="s2">&quot;truth_shape&quot;</span><span class="p">:</span> <span class="n">truth_shape</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]}</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args_int</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;hypothesis_values&quot;</span><span class="p">:</span> <span class="n">h_values</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="s2">&quot;truth_values&quot;</span><span class="p">:</span> <span class="n">truth_values</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]}</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

        <span class="n">hypothesis_indices_shp</span><span class="p">,</span> <span class="n">truth_indices_shp</span> <span class="o">=</span> <span class="n">h_indices</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">],</span> <span class="n">truth_indices</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;hypothesis_indices rank&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">hypothesis_indices_shp</span><span class="p">),</span> <span class="s2">&quot;expected&quot;</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;truth_indices rank&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">truth_indices_shp</span><span class="p">),</span> <span class="s2">&quot;expected&quot;</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;hypothesis_values rank&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">h_values</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]),</span> <span class="s2">&quot;expected&quot;</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;hypothesis_shape rank&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">h_shape</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]),</span> <span class="s2">&quot;expected&quot;</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;truth_values rank&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">truth_values</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]),</span> <span class="s2">&quot;expected&quot;</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;truth_shape rank&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">truth_shape</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]),</span> <span class="s2">&quot;expected&quot;</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;hypothesis_values shape&quot;</span><span class="p">,</span> <span class="n">h_values</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">][</span><span class="mi">0</span><span class="p">],</span>
                        <span class="s2">&quot;hypothesis_indices shape[0]&quot;</span><span class="p">,</span> <span class="n">hypothesis_indices_shp</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;hypothesis_shape&quot;</span><span class="p">,</span> <span class="n">h_shape</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">][</span><span class="mi">0</span><span class="p">],</span>
                        <span class="s2">&quot;hypothesis_indices shape[1]&quot;</span><span class="p">,</span> <span class="n">hypothesis_indices_shp</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;truth_values shape&quot;</span><span class="p">,</span> <span class="n">truth_values</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">][</span><span class="mi">0</span><span class="p">],</span>
                        <span class="s2">&quot;truth_indices shape[0]&quot;</span><span class="p">,</span> <span class="n">truth_indices_shp</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;hypothesis_shape&quot;</span><span class="p">,</span> <span class="n">h_shape</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">][</span><span class="mi">0</span><span class="p">],</span>
                        <span class="s2">&quot;truth_shape&quot;</span><span class="p">,</span> <span class="n">truth_shape</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">][</span><span class="mi">0</span><span class="p">],</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">hypothesis_shape_v</span> <span class="o">=</span> <span class="n">h_shape</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">()</span>
        <span class="n">truth_shape_v</span> <span class="o">=</span> <span class="n">truth_shape</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">()</span>
        <span class="n">out_shape_rank</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">hypothesis_shape_v</span><span class="p">)</span> <span class="o">-</span> <span class="mi">1</span>
        <span class="n">out_shape</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">out_shape_rank</span><span class="p">):</span>
            <span class="n">out_shape</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">max</span><span class="p">(</span><span class="n">hypothesis_shape_v</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="n">truth_shape_v</span><span class="p">[</span><span class="n">i</span><span class="p">]))</span>

        <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">out_shape</span><span class="p">),</span>
                <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">),</span>
                <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span></div>


<span class="k">class</span> <span class="nc">TransShape</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Transforms the shape of input tensor to target shape.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - A input tensor.</span>
<span class="sd">        - **out_shape** (tuple[int]) - The shape of output data.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, a tensor whose data type is same as &#39;input_x&#39;, and the shape is the same as the `out_shape`.</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize TransShape.&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">__setattr_flag__</span> <span class="o">=</span> <span class="kc">True</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">shape</span><span class="p">):</span>
        <span class="n">shp</span> <span class="o">=</span> <span class="n">shape</span><span class="p">[</span><span class="s1">&#39;value&#39;</span><span class="p">]</span>
        <span class="n">dtype</span> <span class="o">=</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">]</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="n">dtype</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span> <span class="o">+</span> <span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">bool_</span><span class="p">,),</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;out_shape&#39;</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">shp</span><span class="p">))</span>
        <span class="k">return</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">shp</span><span class="p">,</span>
                <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">dtype</span><span class="p">,</span>
                <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span>


<div class="viewcode-block" id="Sort"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.Sort.html#mindspore.ops.Sort">[docs]</a><span class="k">class</span> <span class="nc">Sort</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Sorts the elements of the input tensor along a given dimension in ascending order by value.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (int): The dimension to sort along. Default: -1.</span>
<span class="sd">        descending (bool): Controls the sorting order. If descending is True then the elements</span>
<span class="sd">            are sorted in descending order by value. Default: False.</span>

<span class="sd">    .. warning::</span>
<span class="sd">        Currently, only the data type of Float16 is supported. If use Float32, it may cause loss</span>
<span class="sd">        of accuracy.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - The input to sort, with float16 or float32 data type.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        - **y1** (Tensor) - A tensor whose values are the sorted values, with the same shape and data type as input.</span>
<span class="sd">        - **y2** (Tensor) - The indices of the elements in the original input tensor. Data type is int32.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `axis` is not an int.</span>
<span class="sd">        TypeError: If `descending` is not a bool.</span>
<span class="sd">        TypeError: If dtype of `x` is neither float16 nor float32.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)</span>
<span class="sd">        &gt;&gt;&gt; sort = ops.Sort()</span>
<span class="sd">        &gt;&gt;&gt; output = sort(x)</span>
<span class="sd">        &gt;&gt;&gt; # The output below is based on the Ascend platform.</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[3, 3], dtype=Float16, value=</span>
<span class="sd">        [[ 1.0000e+00,  2.0000e+00,  8.0000e+00],</span>
<span class="sd">         [ 3.0000e+00,  5.0000e+00,  9.0000e+00],</span>
<span class="sd">         [ 4.0000e+00,  6.0000e+00,  7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value=</span>
<span class="sd">        [[2, 1, 0],</span>
<span class="sd">         [2, 0, 1],</span>
<span class="sd">         [0, 1, 2]]))</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=-</span><span class="mi">1</span><span class="p">,</span> <span class="n">descending</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Sort&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">axis</span> <span class="o">=</span> <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">descending</span> <span class="o">=</span> <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;descending&quot;</span><span class="p">,</span> <span class="n">descending</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y1&#39;</span><span class="p">,</span> <span class="s1">&#39;y2&#39;</span><span class="p">])</span></div>


<span class="k">class</span> <span class="nc">EmbeddingLookup</span><span class="p">(</span><span class="n">PrimitiveWithCheck</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns a slice of input tensor based on the specified indices.</span>

<span class="sd">    This Primitive has the similar functionality as GatherV2 operating on `axis = 0`, but has one more inputs:</span>
<span class="sd">    `offset`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">          This represents a Tensor slice, instead of the entire Tensor. Currently, the dimension is restricted to be 2.</span>
<span class="sd">        - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.</span>
<span class="sd">          Specifies the indices of elements of the original Tensor. Values can be out of range of `input_params`,</span>
<span class="sd">          and the exceeding part will be filled with 0 in the output. Values do not support negative and the result</span>
<span class="sd">          is undefined if values are negative. The data type should be int32 or int64.</span>
<span class="sd">        - **offset** (int) - Specifies the offset value of this `input_params` slice. Thus the real indices</span>
<span class="sd">          are equal to `input_indices` minus `offset`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. The data type is the same with `input_params`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `input_indices` is not int.</span>
<span class="sd">        ValueError: If length of shape of `input_params` is greater than 2.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``CPU`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; offset = 4</span>
<span class="sd">        &gt;&gt;&gt; output = ops.EmbeddingLookup()(input_params, input_indices, offset)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[[10. 11.]</span>
<span class="sd">          [ 0.  0.]]</span>
<span class="sd">         [[ 0.  0.]</span>
<span class="sd">          [10. 11.]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize EmbeddingLookup.&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">__setattr_flag__</span> <span class="o">=</span> <span class="kc">True</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;params&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;offset&#39;</span><span class="p">],</span>
                                <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">__check__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">params</span><span class="p">,</span> <span class="n">indices</span><span class="p">,</span> <span class="n">offset</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;params&quot;</span><span class="p">,</span> <span class="n">params</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s2">&quot;indices&quot;</span><span class="p">,</span> <span class="n">indices</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;offset&quot;</span><span class="p">,</span> <span class="n">offset</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int_</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">indices_shp</span> <span class="o">=</span> <span class="n">indices</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">indices_shp</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the dimension of &#39;input_indices&#39; should not &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;be zero, but got </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">indices_shp</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="n">params_shp</span> <span class="o">=</span> <span class="n">params</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">]</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">params_shp</span><span class="p">)</span> <span class="o">&gt;</span> <span class="mi">2</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the dimension of &#39;input_params&#39; must &lt;= 2, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">params_shp</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">GatherD</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Gathers values along an axis specified by dim.</span>

<span class="sd">    For a 3-D tensor, the output is:</span>

<span class="sd">    .. code-block::</span>

<span class="sd">        output[i][j][k] = x[index[i][j][k]][j][k]  # if dim == 0</span>

<span class="sd">        output[i][j][k] = x[i][index[i][j][k]][k]  # if dim == 1</span>

<span class="sd">        output[i][j][k] = x[i][j][index[i][j][k]]  # if dim == 2</span>

<span class="sd">    If `x` is an n-D tensor with shape :math:`(z_0, z_1, ..., z_i, ..., z_{n-1})` and `dim` = i,</span>
<span class="sd">    the `index` must be an n-D tensor with shape :math:`(z_0, z_1, ..., y, ..., z_{n-1})`</span>
<span class="sd">    where `y`&gt;=1 and the output will have the same shape as `index`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - The source tensor.</span>
<span class="sd">          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.</span>
<span class="sd">        - **dim** (int) - The axis along which to index. It must be int32 or int64. Only constant value is allowed.</span>
<span class="sd">        - **index** (Tensor) - The indices of elements to gather. It can be one of the following data types:</span>
<span class="sd">          int32, int64. The value range of each index element is [-x_rank[dim], x_rank[dim]).</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`, has the same data type with `x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `dim` or `index` is neither int32 nor int64.</span>
<span class="sd">        ValueError: If length of shape of `x` is not equal to length of shape of `index`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; dim = 1</span>
<span class="sd">        &gt;&gt;&gt; output = ops.GatherD()(x, dim, index)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[1 1]</span>
<span class="sd">         [4 3]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize GatherD&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;dim&#39;</span><span class="p">,</span> <span class="s1">&#39;index&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>


<span class="k">class</span> <span class="nc">Identity</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns a Tensor with the same shape and contents as input.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, the shape of tensor and the data type are the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `x` is not a Tensor.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``CPU`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)</span>
<span class="sd">        &gt;&gt;&gt; output = ops.Identity()(x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [1 2 3 4]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="c1"># Side effect is identity with input.</span>
    <span class="n">side_effect_propagate</span> <span class="o">=</span> <span class="mi">1</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize identity&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;side_effect_propagate&#39;</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">__infer__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_subclass</span><span class="p">(</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span> <span class="o">+</span> <span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">bool_</span><span class="p">,),</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;shape&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;shape&#39;</span><span class="p">],</span>
               <span class="s1">&#39;dtype&#39;</span><span class="p">:</span> <span class="n">x</span><span class="p">[</span><span class="s1">&#39;dtype&#39;</span><span class="p">],</span>
               <span class="s1">&#39;value&#39;</span><span class="p">:</span> <span class="kc">None</span><span class="p">}</span>
        <span class="k">return</span> <span class="n">out</span>


<span class="k">class</span> <span class="nc">Range</span><span class="p">(</span><span class="n">PrimitiveWithCheck</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Creates a sequence of numbers that begins at `start` and extends by increments of</span>
<span class="sd">    `delta` up to but not including `limit`.</span>

<span class="sd">    The types of all 3 inputs must be the same. The type of the resulting tensor is</span>
<span class="sd">    the same as the type of the inputs.</span>

<span class="sd">    Args:</span>
<span class="sd">        maxlen (int): Memory that can fit `maxlen` many elements</span>
<span class="sd">            will be allocated for the output. Optional, must be positive, defaults to 1000000.</span>
<span class="sd">            If the output has more than `maxlen` elements, a runtime error</span>
<span class="sd">            will occur.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **start** (Tensor) - A scalar Tensor. The first number in the sequence. Must have</span>
<span class="sd">          type: int32 or float32</span>
<span class="sd">        - **limit** (Tensor) - A scalar Tensor. Upper limit of the sequence, exclusive. Must</span>
<span class="sd">          have type: int32 or float32</span>
<span class="sd">        - **delta** (Tensor) - A scalar Tensor. Number that increments `start`. Must have</span>
<span class="sd">          type: int32 or float32</span>

<span class="sd">    Outputs:</span>
<span class="sd">       A 1-D Tensor, with the same type as the inputs.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; start = Tensor(0, mstype.int32)</span>
<span class="sd">        &gt;&gt;&gt; limit = Tensor(10, mstype.int32)</span>
<span class="sd">        &gt;&gt;&gt; delta = Tensor(4, mstype.int32)</span>
<span class="sd">        &gt;&gt;&gt; output = ops.Range()(start, limit, delta)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [0, 4, 8]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">maxlen</span><span class="o">=</span><span class="mi">1000000</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;start&#39;</span><span class="p">,</span> <span class="s1">&#39;limit&#39;</span><span class="p">,</span> <span class="s1">&#39;delta&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;maxlen&quot;</span><span class="p">,</span> <span class="n">maxlen</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">maxlen</span><span class="p">,</span> <span class="s2">&quot;maxlen&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">maxlen</span> <span class="o">=</span> <span class="n">maxlen</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s1">&#39;maxlen&#39;</span><span class="p">,</span> <span class="n">maxlen</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">check_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">start_shape</span><span class="p">,</span> <span class="n">limit_shape</span><span class="p">,</span> <span class="n">delta_shape</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;start_shape&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">start_shape</span><span class="p">),</span> <span class="s2">&quot;&quot;</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;limit_shape&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">limit_shape</span><span class="p">),</span> <span class="s2">&quot;&quot;</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check</span><span class="p">(</span><span class="s2">&quot;delta_shape&quot;</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">delta_shape</span><span class="p">),</span> <span class="s2">&quot;&quot;</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">check_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">start_dtype</span><span class="p">,</span> <span class="n">limit_dtype</span><span class="p">,</span> <span class="n">delta_dtype</span><span class="p">):</span>
        <span class="n">valid_dtypes</span> <span class="o">=</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">]</span>
        <span class="n">inputs</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;start&quot;</span><span class="p">:</span> <span class="n">start_dtype</span><span class="p">,</span> <span class="s2">&quot;limit&quot;</span><span class="p">:</span> <span class="n">limit_dtype</span><span class="p">,</span> <span class="s2">&quot;delta&quot;</span><span class="p">:</span> <span class="n">delta_dtype</span><span class="p">}</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">inputs</span><span class="p">,</span> <span class="n">valid_dtypes</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">infer_value</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">start_value</span><span class="p">,</span> <span class="n">limit_value</span><span class="p">,</span> <span class="n">delat_value</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Infer the value of input for Range.&quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="n">start_value</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">limit_value</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">delat_value</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">start</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">asscalar</span><span class="p">(</span><span class="n">start_value</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">())</span>
            <span class="n">limit</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">asscalar</span><span class="p">(</span><span class="n">limit_value</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">())</span>
            <span class="n">delat</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">asscalar</span><span class="p">(</span><span class="n">delat_value</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">())</span>
            <span class="k">return</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="n">start</span><span class="p">,</span> <span class="n">limit</span><span class="p">,</span> <span class="n">delat</span><span class="p">),</span> <span class="n">dtype</span><span class="o">=</span><span class="n">start_value</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
        <span class="k">return</span> <span class="kc">None</span>


<span class="k">class</span> <span class="nc">MaskedFill</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Fills elements of self tensor with value where mask is True.</span>

<span class="sd">    The shapes of `input` and `mask` need to be the same or broadcast.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input** (Tensor) - The source tensor whose data type is one of float16, float32, int8, int32.</span>
<span class="sd">        - **mask** (Tensor[bool]) - The boolean mask.</span>
<span class="sd">        - **value** (Union[float, Tensor]) – The value to fill in with, which only supports</span>
<span class="sd">          a 0-dimensional tensor or a float number.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same type and shape as `input`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input` or `mask` is not a tensor.</span>
<span class="sd">        TypeError: If `value` is neither float number nor tensor.</span>
<span class="sd">        TypeError: If dtype of `input` or `value` is not one of float16, float32, int8, int32.</span>
<span class="sd">        TypeError: If dtype of `value` is different from that of `input`.</span>
<span class="sd">        TypeError: If dtype of `mask` is not bool.</span>
<span class="sd">        ValueError: If the shapes of `input` and `mask` could not be broadcast.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)</span>
<span class="sd">        &gt;&gt;&gt; output = ops.MaskedFill()(input, mask, 0.5)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [0.5 0.5 3.  0.5]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;input&#39;</span><span class="p">,</span> <span class="s1">&#39;mask&#39;</span><span class="p">,</span> <span class="s1">&#39;value&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>


<span class="k">class</span> <span class="nc">MaskedSelect</span><span class="p">(</span><span class="n">PrimitiveWithCheck</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Returns a new 1-D Tensor which indexes the input tensor according to the boolean mask.</span>
<span class="sd">    The shapes of the mask tensor and the input tensor don&#39;t need to match, but they must be broadcastable.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>
<span class="sd">        - **mask** (Tensor[bool]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        A 1-D Tensor, with the same type as x.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `x` is not a Tensor.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)</span>
<span class="sd">        &gt;&gt;&gt; mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)</span>
<span class="sd">        &gt;&gt;&gt; output = ops.MaskedSelect()(x, mask)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [1 3]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="s1">&#39;mask&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">check_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_shape</span><span class="p">,</span> <span class="n">mask_shape</span><span class="p">):</span>
        <span class="n">get_broadcast_shape</span><span class="p">(</span><span class="n">x_shape</span><span class="p">,</span> <span class="n">mask_shape</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="n">arg_name1</span><span class="o">=</span><span class="s2">&quot;x&quot;</span><span class="p">,</span> <span class="n">arg_name2</span><span class="o">=</span><span class="s2">&quot;mask&quot;</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">check_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">mask_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;mask&#39;</span><span class="p">,</span> <span class="n">mask_dtype</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">bool_</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;x&#39;</span><span class="p">,</span> <span class="n">x_dtype</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">SearchSorted</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Find the indices from the innermost dimension of `sequence` such that the order of the innermost dimension</span>
<span class="sd">    within `sequence` would be preserved when the corresponding values in `values` were inserted before the indices.</span>

<span class="sd">    Args:</span>
<span class="sd">        out_int32 (bool): Output datatype. Optional. If True, the output datatype will be int32;</span>
<span class="sd">                          if False, the output datatype will be int64. Default is False.</span>
<span class="sd">        right (bool): Search Strategy. Optional. If True, return the last suitable index found.</span>
<span class="sd">                      If False, return the first such index. Default is False.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **sequence** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R-1, x_R)` or `(x_1)`.</span>
<span class="sd">                                  It must contain monitonically increasing sequence on the innermost dimension.</span>
<span class="sd">        - **values** (Tensor) - The shape of tensor is : math:`(x_1, x_2, ..., x_R-1, x_S)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor containing the indices from the innermost dimension of the input sequence such that,</span>
<span class="sd">        if insert the corresponding value in the values tensor, the order of the tensor sequence would be preserved.</span>
<span class="sd">        The shape of tensor is :math:`(x_1, x_2, ..., x_R-1, x_S)`,</span>
<span class="sd">        whose datatype is int32 if out_int32 is True, otherwise int64, and shape is the same as the shape of values.</span>

<span class="sd">    Raises:</span>
<span class="sd">        ValueError: If `sequence` and `values` do not have proper shapes.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``CPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; values = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = ops.SearchSorted()(sequence, values)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[2 4 5]</span>
<span class="sd">         [1 2 4]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">out_int32</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">right</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize SearchSorted&quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">out_int32</span> <span class="o">=</span> <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;out_int32&quot;</span><span class="p">,</span> <span class="n">out_int32</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">right</span> <span class="o">=</span> <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;right&quot;</span><span class="p">,</span> <span class="n">right</span><span class="p">,</span> <span class="p">[</span><span class="nb">bool</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;sequence&#39;</span><span class="p">,</span> <span class="s1">&#39;values&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;positions&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">sequence_shape</span><span class="p">,</span> <span class="n">values_shape</span><span class="p">):</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">sequence_shape</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">1</span> <span class="ow">and</span> <span class="n">sequence_shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">!=</span> <span class="n">values_shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the &#39;sequence&#39; should be 1 dimensional or &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;all dimensions except the last dimension of &#39;sequence&#39; &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;must be the same as all dimensions except the last dimension of &#39;values&#39;. &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got shape of &#39;sequence&#39;: </span><span class="si">{</span><span class="n">sequence_shape</span><span class="si">}</span><span class="s2"> &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;and shape of &#39;values&#39;: </span><span class="si">{</span><span class="n">values_shape</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">values_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">sequence_dtype</span><span class="p">,</span> <span class="n">values_dtype</span><span class="p">):</span>
        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;sequence_dtype&quot;</span><span class="p">:</span> <span class="n">sequence_dtype</span><span class="p">,</span> <span class="s2">&quot;values_dtype&quot;</span><span class="p">:</span> <span class="n">values_dtype</span><span class="p">}</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">number_type</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">out_int32</span> <span class="k">else</span> <span class="n">mstype</span><span class="o">.</span><span class="n">tensor_type</span><span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">)</span>


<div class="viewcode-block" id="TensorScatterMax"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.TensorScatterMax.html#mindspore.ops.TensorScatterMax">[docs]</a><span class="k">class</span> <span class="nc">TensorScatterMax</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    By comparing the value at the position indicated by the index in input_x with the value in the update,</span>
<span class="sd">    the value at the index will eventually be equal to the largest one to create a new tensor.</span>

<span class="sd">    The last axis of the index is the depth of each index vector. For each index vector,</span>
<span class="sd">    there must be a corresponding value in `updates`. The shape of `updates` should be</span>
<span class="sd">    equal to the shape of input_x[indices].</span>
<span class="sd">    For more details, see use cases.</span>

<span class="sd">    Note:</span>
<span class="sd">        If some values of the `indices` are out of bound, instead of raising an index error,</span>
<span class="sd">        the corresponding `updates` will not be updated to `input_x`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].</span>
<span class="sd">        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.</span>
<span class="sd">          The rank must be at least 2.</span>
<span class="sd">        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,</span>
<span class="sd">          and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `indices` is neither int32 nor int64.</span>
<span class="sd">        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; # Next, demonstrate the approximate operation process of this operator:</span>
<span class="sd">        &gt;&gt;&gt; # 1, indices[0] = [0, 0], indices[1] = [0, 0]</span>
<span class="sd">        &gt;&gt;&gt; # 2, And input_x[0, 0] = -0.1</span>
<span class="sd">        &gt;&gt;&gt; # 3, So input_x[indices] = [-0.1, -0.1]</span>
<span class="sd">        &gt;&gt;&gt; # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)</span>
<span class="sd">        &gt;&gt;&gt; op = ops.TensorScatterMax()</span>
<span class="sd">        &gt;&gt;&gt; # 5, Perform the max operation for the first time:</span>
<span class="sd">        &gt;&gt;&gt; #      first_input_x = Max(input_x[0][0], updates[0]) = [[2.2, 0.3, 3.6], [0.4, 0.5, -3.2]]</span>
<span class="sd">        &gt;&gt;&gt; # 6, Perform the max operation for the second time:</span>
<span class="sd">        &gt;&gt;&gt; #      second_input_x = Max(input_x[0][0], updates[0]) = [[2.2, 0.3, 3.6], [0.4, 0.5, -3.2]]</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 2.2  0.3  3.6]</span>
<span class="sd">         [ 0.4  0.5 -3.2]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">):</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">indices_shape</span><span class="p">)</span> <span class="o">&lt;</span> <span class="mi">2</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the dimension of &#39;indices&#39; cannot be less than 2,&quot;</span>
                             <span class="sa">f</span><span class="s2">&quot; but got </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">indices_shape</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">&gt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">input_x_shape</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the last dimension of &#39;indices&#39; must be less than or equal to &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;the dimension of &#39;input_x&#39;, but got the &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;last dimension of &#39;indices&#39;: </span><span class="si">{</span><span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span><span class="si">}</span><span class="s2"> and the dimension of &#39;input_x&#39;: &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">input_x_shape</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="n">updates_shape_check</span> <span class="o">=</span> <span class="n">indices_shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">+</span> <span class="n">input_x_shape</span><span class="p">[</span><span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]:]</span>
        <span class="k">if</span> <span class="n">updates_shape_check</span> <span class="o">!=</span> <span class="n">updates_shape</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the shape of &#39;update&#39; must be equal to updates_shape_check, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;where updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got the shape of &#39;update&#39;: </span><span class="si">{</span><span class="n">updates_shape</span><span class="si">}</span><span class="s2">, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;updates_shape_check: </span><span class="si">{</span><span class="n">updates_shape_check</span><span class="si">}</span><span class="s2">, indices_shape: </span><span class="si">{</span><span class="n">indices_shape</span><span class="si">}</span><span class="s2"> and &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;input_x_shape: </span><span class="si">{</span><span class="n">input_x_shape</span><span class="si">}</span><span class="s2">. Please check input_x_shape and indices_shape.&quot;</span><span class="p">)</span>

        <span class="k">return</span> <span class="n">input_x_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_x_dtype</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="n">updates_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;input_x&quot;</span><span class="p">:</span> <span class="n">input_x_dtype</span><span class="p">,</span> <span class="s2">&quot;updates&quot;</span><span class="p">:</span> <span class="n">updates_dtype</span><span class="p">}</span>
        <span class="n">valid_input_types</span> <span class="o">=</span> <span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">float16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int8</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">uint8</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="n">valid_input_types</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">input_x_dtype</span></div>


<div class="viewcode-block" id="TensorScatterMin"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.TensorScatterMin.html#mindspore.ops.TensorScatterMin">[docs]</a><span class="k">class</span> <span class="nc">TensorScatterMin</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    By comparing the value at the position indicated by the index in input_x with the value in the `updates`,</span>
<span class="sd">    the value at the index will eventually be equal to the smallest one to create a new tensor.</span>

<span class="sd">    The last axis of the index is the depth of each index vector. For each index vector,</span>
<span class="sd">    there must be a corresponding value in `updates`. The shape of `updates` should be</span>
<span class="sd">    equal to the shape of input_x[indices].</span>
<span class="sd">    For more details, see use cases.</span>

<span class="sd">    Note:</span>
<span class="sd">        If some values of the `indices` are out of bound, instead of raising an index error,</span>
<span class="sd">        the corresponding `updates` will not be updated to `input_x`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].</span>
<span class="sd">        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.</span>
<span class="sd">          The rank must be at least 2.</span>
<span class="sd">        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,</span>
<span class="sd">          and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `indices` is neither int32 nor int64.</span>
<span class="sd">        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; # Next, demonstrate the approximate operation process of this operator:</span>
<span class="sd">        &gt;&gt;&gt; # 1, indices[0] = [0, 0], indices[1] = [0, 0]</span>
<span class="sd">        &gt;&gt;&gt; # 2, And input_x[0, 0] = -0.1</span>
<span class="sd">        &gt;&gt;&gt; # 3, So input_x[indices] = [-0.1, -0.1]</span>
<span class="sd">        &gt;&gt;&gt; # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)</span>
<span class="sd">        &gt;&gt;&gt; op = ops.TensorScatterMin()</span>
<span class="sd">        &gt;&gt;&gt; # 5, Perform the min operation for the first time:</span>
<span class="sd">        &gt;&gt;&gt; #      first_input_x = Min(input_x[0][0], updates[0]) = [[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]</span>
<span class="sd">        &gt;&gt;&gt; # 6, Perform the min operation for the second time:</span>
<span class="sd">        &gt;&gt;&gt; #      second_input_x = Min(input_x[0][0], updates[1]) = [[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ -0.1  0.3  3.6]</span>
<span class="sd">         [ 0.4  0.5 -3.2]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">):</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">indices_shape</span><span class="p">)</span> <span class="o">&lt;</span> <span class="mi">2</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the dimension of &#39;indices&#39; cannot be less than 2,&quot;</span>
                             <span class="sa">f</span><span class="s2">&quot; but got </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">indices_shape</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">&gt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">input_x_shape</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the last dimension of &#39;indices&#39; must be less than or equal to &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;the dimension of &#39;input_x&#39;, but got the &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;last dimension of &#39;indices&#39;: </span><span class="si">{</span><span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span><span class="si">}</span><span class="s2"> and the dimension of &#39;input_x&#39;: &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">input_x_shape</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="n">updates_shape_check</span> <span class="o">=</span> <span class="n">indices_shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">+</span> <span class="n">input_x_shape</span><span class="p">[</span><span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]:]</span>
        <span class="k">if</span> <span class="n">updates_shape_check</span> <span class="o">!=</span> <span class="n">updates_shape</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the shape of &#39;update&#39; must be equal to updates_shape_check, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;where updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got the shape of &#39;update&#39;: </span><span class="si">{</span><span class="n">updates_shape</span><span class="si">}</span><span class="s2">, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;updates_shape_check: </span><span class="si">{</span><span class="n">updates_shape_check</span><span class="si">}</span><span class="s2">, indices_shape: </span><span class="si">{</span><span class="n">indices_shape</span><span class="si">}</span><span class="s2"> and &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;input_x_shape: </span><span class="si">{</span><span class="n">input_x_shape</span><span class="si">}</span><span class="s2">. Please check input_x_shape and indices_shape.&quot;</span><span class="p">)</span>

        <span class="k">return</span> <span class="n">input_x_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_x_dtype</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="n">updates_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;input_x&quot;</span><span class="p">:</span> <span class="n">input_x_dtype</span><span class="p">,</span> <span class="s2">&quot;updates&quot;</span><span class="p">:</span> <span class="n">updates_dtype</span><span class="p">}</span>
        <span class="n">valid_input_types</span> <span class="o">=</span> <span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">float16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int8</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">uint8</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="n">valid_input_types</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">input_x_dtype</span></div>


<div class="viewcode-block" id="TensorScatterSub"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.TensorScatterSub.html#mindspore.ops.TensorScatterSub">[docs]</a><span class="k">class</span> <span class="nc">TensorScatterSub</span><span class="p">(</span><span class="n">PrimitiveWithInfer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Creates a new tensor by subtracting the values from the positions in `input_x` indicated by</span>
<span class="sd">    `indices`, with values from `updates`. When multiple values are provided for the same</span>
<span class="sd">    index, the result of the update will be to subtract these values respectively. This operation is almost</span>
<span class="sd">    equivalent to using ScatterNdSub, except that the updates are applied on `Tensor` instead of `Parameter`.</span>

<span class="sd">    The last axis of `indices` is the depth of each index vectors. For each index vector,</span>
<span class="sd">    there must be a corresponding value in `updates`. The shape of `updates` should be</span>
<span class="sd">    equal to the shape of `input_x[indices]`. For more details, see use cases.</span>

<span class="sd">    Note:</span>
<span class="sd">        If some values of the `indices` are out of bound, instead of raising an index error,</span>
<span class="sd">        the corresponding `updates` will not be updated to `input_x`.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].</span>
<span class="sd">        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.</span>
<span class="sd">          The rank must be at least 2.</span>
<span class="sd">        - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,</span>
<span class="sd">          and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `indices` is neither int32 nor int64.</span>
<span class="sd">        ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; # Next, demonstrate the approximate operation process of this operator:</span>
<span class="sd">        &gt;&gt;&gt; # 1, indices[0] = [0, 0], indices[1] = [0, 0]</span>
<span class="sd">        &gt;&gt;&gt; # 2, And input_x[0, 0] = -0.1</span>
<span class="sd">        &gt;&gt;&gt; # 3, So input_x[indices] = [-0.1, -0.1]</span>
<span class="sd">        &gt;&gt;&gt; # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)</span>
<span class="sd">        &gt;&gt;&gt; op = ops.TensorScatterSub()</span>
<span class="sd">        &gt;&gt;&gt; # 5, Perform the subtract operation for the first time:</span>
<span class="sd">        &gt;&gt;&gt; #      first_input_x = input_x[0][0] - updates[0] = [[-1.1, 0.3, 3.6], [0.4, 0.5, -3.2]]</span>
<span class="sd">        &gt;&gt;&gt; # 6, Perform the subtract operation for the second time:</span>
<span class="sd">        &gt;&gt;&gt; #      second_input_x = input_x[0][0] - updates[1] = [[-3.3, 0.3, 3.6], [0.4, 0.5, -3.2]]</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[-3.3000002  0.3        3.6      ]</span>
<span class="sd">         [ 0.4        0.5       -3.2      ]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;input_x&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>

    <span class="k">def</span> <span class="nf">infer_shape</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_x_shape</span><span class="p">,</span> <span class="n">indices_shape</span><span class="p">,</span> <span class="n">updates_shape</span><span class="p">):</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">indices_shape</span><span class="p">)</span> <span class="o">&lt;</span> <span class="mi">2</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the dimension of &#39;indices&#39; cannot be less than 2,&quot;</span>
                             <span class="sa">f</span><span class="s2">&quot; but got </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">indices_shape</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">&gt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">input_x_shape</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the last dimension of &#39;indices&#39; must be less than or equal to &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;the dimension of &#39;input_x&#39;, but got the &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;last dimension of &#39;indices&#39;: </span><span class="si">{</span><span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span><span class="si">}</span><span class="s2"> and the dimension of &#39;input_x&#39;: &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">input_x_shape</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="n">updates_shape_check</span> <span class="o">=</span> <span class="n">indices_shape</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">+</span> <span class="n">input_x_shape</span><span class="p">[</span><span class="n">indices_shape</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]:]</span>
        <span class="k">if</span> <span class="n">updates_shape_check</span> <span class="o">!=</span> <span class="n">updates_shape</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="s2">&#39;, the shape of &#39;update&#39; must be equal to updates_shape_check, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;where updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got the shape of &#39;update&#39;: </span><span class="si">{</span><span class="n">updates_shape</span><span class="si">}</span><span class="s2">, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;updates_shape_check: </span><span class="si">{</span><span class="n">updates_shape_check</span><span class="si">}</span><span class="s2">, indices_shape: </span><span class="si">{</span><span class="n">indices_shape</span><span class="si">}</span><span class="s2"> and &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;input_x_shape: </span><span class="si">{</span><span class="n">input_x_shape</span><span class="si">}</span><span class="s2">. Please check input_x_shape and indices_shape.&quot;</span><span class="p">)</span>

        <span class="k">return</span> <span class="n">input_x_shape</span>

    <span class="k">def</span> <span class="nf">infer_dtype</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">input_x_dtype</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="n">updates_dtype</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensor_dtype_valid</span><span class="p">(</span><span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="n">indices_dtype</span><span class="p">,</span> <span class="p">[</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int64</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">args</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;input_x&quot;</span><span class="p">:</span> <span class="n">input_x_dtype</span><span class="p">,</span> <span class="s2">&quot;updates&quot;</span><span class="p">:</span> <span class="n">updates_dtype</span><span class="p">}</span>
        <span class="n">valid_input_types</span> <span class="o">=</span> <span class="p">(</span><span class="n">mstype</span><span class="o">.</span><span class="n">float16</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int8</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">uint8</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_tensors_dtypes_same_and_valid</span><span class="p">(</span><span class="n">args</span><span class="p">,</span> <span class="n">valid_input_types</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">input_x_dtype</span></div>


<div class="viewcode-block" id="SplitV"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.SplitV.html#mindspore.ops.SplitV">[docs]</a><span class="k">class</span> <span class="nc">SplitV</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Splits the input tensor into num_split tensors along the given dimension.</span>

<span class="sd">    The `input_x` tensor will be split into sub-tensors with individual shapes given by `size_splits` along the split</span>
<span class="sd">    dimension. This requires that `input_x.shape(split_dim)` is equal to the sum of `size_splits`.</span>

<span class="sd">    The shape of `input_x` is :math:`(x_1, x_2, ..., x_M, ..., x_R)`. The rank of `input_x` is `R`. Set the given</span>
<span class="sd">    `split_dim` as M, and :math:`-R \le M &lt; R`. Set the given `num_split` as `N`, the given `size_splits` as</span>
<span class="sd">    :math:`(x_{m_1}, x_{m_2}, ..., x_{m_N})`, :math:`x_M=\sum_{i=1}^Nx_{m_i}`. The output is a list of tensor objects,</span>
<span class="sd">    for the :math:`i`-th tensor, it has the shape of :math:`(x_1, x_2, ..., x_{m_i}, ..., x_R)`. :math:`x_{m_i}` is the</span>
<span class="sd">    :math:`M`-th dimension of the :math:`i`-th tensor. Then, the shape of the output tensor is</span>

<span class="sd">    .. math::</span>

<span class="sd">        ((x_1, x_2, ..., x_{m_1}, ..., x_R), (x_1, x_2, ..., x_{m_2}, ..., x_R), ...,</span>
<span class="sd">         (x_1, x_2, ..., x_{m_N}, ..., x_R))</span>

<span class="sd">    Args:</span>
<span class="sd">        size_splits (Union[tuple, list]): The list containing the sizes of each output tensor along the split</span>
<span class="sd">                                          dimension. Must sum to the dimension of value along `split_dim`.</span>
<span class="sd">                                          Can contain one -1 indicating that dimension is to be inferred.</span>
<span class="sd">        split_dim (int): The dimension along which to split. Must be in the range [-len(input_x.shape),</span>
<span class="sd">                         len(input_x.shape)).</span>
<span class="sd">        num_split (int): The number of output tensors. Must be positive int.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ...,x_M ..., x_R)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, a list of `num_split` Tensor objects with the shape :math:`((x_1, x_2, ..., x_{m_1}, ..., x_R),</span>
<span class="sd">        (x_1, x_2, ..., x_{m_2}, ..., x_R), ..., (x_1, x_2, ..., x_{m_N}, ..., x_R))`, :math:`x_M=\sum_{i=1}^Nx_{m_i}`.</span>
<span class="sd">        The data type is the same with `input_x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `input_x` is not a Tensor.</span>
<span class="sd">        TypeError: If `size_splits` is not a tuple or a list.</span>
<span class="sd">        TypeError: If element of `size_splits` is not an int.</span>
<span class="sd">        TypeError: If `split_dim` or `num_split` is not an int.</span>
<span class="sd">        ValueError: If rank of the `size_splits` is not equal to `num_split`.</span>
<span class="sd">        ValueError: If sum of the `size_splits` is not equal to the dimension of value along `split_dim`.</span>
<span class="sd">        ValueError: If `split_dim` is out of the range [-len(input_x.shape), len(input_x.shape)).</span>
<span class="sd">        ValueError: If the `num_split` is less than or equal to 0.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; op = ops.SplitV(size_splits=[1, -1], split_dim=1, num_split=2)</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[3, 1], dtype=Int32, value=</span>
<span class="sd">        [[1],</span>
<span class="sd">         [4],</span>
<span class="sd">         [7]]), Tensor(shape=[3, 2], dtype=Int32, value=</span>
<span class="sd">        [[2, 3],</span>
<span class="sd">         [5, 6],</span>
<span class="sd">         [8, 9]]))</span>
<span class="sd">        &gt;&gt;&gt; input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; op = ops.SplitV(size_splits=[2, 1], split_dim=0, num_split=2)</span>
<span class="sd">        &gt;&gt;&gt; output = op(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        (Tensor(shape=[2, 3], dtype=Int32, value=</span>
<span class="sd">        [[1, 2, 3],</span>
<span class="sd">         [4, 5, 6]]), Tensor(shape=[1, 3], dtype=Int32, value=</span>
<span class="sd">        [[7, 8, 9]]))</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">size_splits</span><span class="p">,</span> <span class="n">split_dim</span><span class="p">,</span> <span class="n">num_split</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize SplitV&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;size_splits&quot;</span><span class="p">,</span> <span class="n">size_splits</span><span class="p">,</span> <span class="p">[</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">elements_of_size_splits</span> <span class="ow">in</span> <span class="n">size_splits</span><span class="p">:</span>
            <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;elements of size_splits&quot;</span><span class="p">,</span> <span class="n">elements_of_size_splits</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">elements_of_size_splits</span> <span class="o">!=</span> <span class="o">-</span><span class="mi">1</span> <span class="ow">and</span> <span class="n">elements_of_size_splits</span> <span class="o">&lt;</span> <span class="mi">1</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For </span><span class="se">\&#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="si">}</span><span class="se">\&#39;</span><span class="s2">, all elements of size_splits must be positive (except at most &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;one default value -1), but got: </span><span class="si">{</span><span class="n">elements_of_size_splits</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;split_dim&quot;</span><span class="p">,</span> <span class="n">split_dim</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;num_split&quot;</span><span class="p">,</span> <span class="n">num_split</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">num_split</span><span class="p">,</span> <span class="s2">&quot;num_split&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;input_x&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;output&#39;</span><span class="p">])</span></div>


<span class="k">class</span> <span class="nc">ScatterElements</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    ScatterElements takes three inputs data, updates, and indices of the same rank r &gt;= 1</span>
<span class="sd">    and an optional attribute axis that identifies an axis of data (default is 0).</span>
<span class="sd">    The output of the operation is produced by creating a copy of the input data, and then updating its value to</span>
<span class="sd">    values specified by updates at specific index positions specified by indices.</span>

<span class="sd">    Args:</span>
<span class="sd">        axis (int): which axis to scatter, default is 0.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **data** (Tensor) - The target tensor. c</span>
<span class="sd">        - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.</span>
<span class="sd">        - **update** (Tensor) - The tensor to update the input tensor, has the same type as input,</span>
<span class="sd">          and update.shape should be equal to indices.shape.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same shape and type as `data`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of `indices` is neither int32 nor int64.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; op = ops.ScatterElements(0)</span>
<span class="sd">        &gt;&gt;&gt; data = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[1, 0, 2], [0, 2, 1]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[0, 0, 0], [0, 0, 0]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = op(data, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 0.0  0.0  3.0]</span>
<span class="sd">         [ 0.0  5.0  0.0]</span>
<span class="sd">         [ 7.0  0.0  0.0]]</span>
<span class="sd">        &gt;&gt;&gt; op = ops.ScatterElements(1)</span>
<span class="sd">        &gt;&gt;&gt; data = Tensor(np.array([[1, 2, 3, 4, 5]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; indices = Tensor(np.array([[2, 4]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; updates = Tensor(np.array([[8, 8]]), mindspore.int32)</span>
<span class="sd">        &gt;&gt;&gt; output = op(data, indices, updates)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 1  2  8  4  8]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ScatterElements&quot;&quot;&quot;</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;axis&quot;</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">init_prim_io_names</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;data&#39;</span><span class="p">,</span> <span class="s1">&#39;indices&#39;</span><span class="p">,</span> <span class="s1">&#39;updates&#39;</span><span class="p">],</span> <span class="n">outputs</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;y&#39;</span><span class="p">])</span>


<div class="viewcode-block" id="ExtractVolumePatches"><a class="viewcode-back" href="../../../../api_python/ops/mindspore.ops.ExtractVolumePatches.html#mindspore.ops.ExtractVolumePatches">[docs]</a><span class="k">class</span> <span class="nc">ExtractVolumePatches</span><span class="p">(</span><span class="n">Primitive</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Extract patches from input and put them in the &quot;depth&quot; output dimension. 3D extension of extract_image_patches.</span>

<span class="sd">    Args:</span>
<span class="sd">        kernel_size (Union[int, tuple[int], list[int]]): A list of ints which&#39;s length is 3 or 5.</span>
<span class="sd">            The size of the sliding window for each dimension of input. Must be: [1, 1, k_d, k_h, k_w] or</span>
<span class="sd">            [k_d, k_h, k_w]. If k_d = k_h = k_w, you can enter an integer.</span>
<span class="sd">        strides (Union[int, tuple[int], list[int]]): A list of ints which&#39;s length is 3 or 5.</span>
<span class="sd">            How far the centers of two consecutive patches are in input. Must be: [1, 1, s_d, s_h, s_w] or</span>
<span class="sd">            [s_d, s_h, s_w]. If s_d = s_h = s_w, you can enter an integer.</span>
<span class="sd">        padding (str): A string from: &quot;SAME&quot;, &quot;VALID&quot;. The type of padding algorithm to use.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **input_x** (Tensor) - A Tensor. Must be one of the following types: float16, float32.</span>
<span class="sd">          5-D Tensor with shape :math:`(x_n, x_c, x_d, x_h, x_w)`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, has the same type as input.</span>
<span class="sd">        If padding is VALID, the shape is :math:`(x_n, k_d * k_h * k_w * x_c, 1 + (x_d - k_d) / s_d,</span>
<span class="sd">        1 + (x_h - k_h) / s_h, 1 + (x_w - k_w) / s_w)`; if padding is SAME, the shape is :math:`(</span>
<span class="sd">        x_n, k_d * k_h * k_w * x_c, (x_d + s_d - 1) / s_d, (x_h + s_h - 1) / s_h, (x_w + s_w - 1) / s_w)`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If dtype of input_x is neither float16 nor float32.</span>
<span class="sd">        TypeError: If kernel_size or strides is not a list, a tuple or an int.</span>
<span class="sd">        TypeError: If input_x is not a tensor.</span>
<span class="sd">        TypeError: If padding is not str.</span>
<span class="sd">        ValueError: If the length of kernel_size is neither 3 nor 5 and kernel_size is not an integer.</span>
<span class="sd">        ValueError: If the length of strides is neither 3 nor 5 and strides is not an integer.</span>
<span class="sd">        ValueError: If padding is neither &quot;VALID&quot; nor &quot;SAME&quot;.</span>
<span class="sd">        ValueError: If elements of kernel_size or strides are not positive integer.</span>
<span class="sd">        ValueError: If input_x is not a tensor in dimension 5.</span>
<span class="sd">        ValueError: If input_x&#39;s shape has zero.</span>
<span class="sd">        ValueError: If one of kernel_size or strides&#39; first two numbers is not 1.</span>
<span class="sd">        ValueError: If padding = &quot;VALID&quot; and input - kernel_size is less than 0 in d, h or w dimension.</span>
<span class="sd">        ValueError: If padding = &quot;SAME&quot; and :math:`padding_needed = ((input_x + strides - 1) / strides - 1) *</span>
<span class="sd">                    strides + kernel_size - input` is less than 0 in d, h or w dimension.</span>
<span class="sd">        ValueError: If x_h is not 1 or x_w is not 1 and x_w + padding_needed - k_w - s_w is less than 0.</span>
<span class="sd">        ValueError: If x_d * x_h * x_w is greater than 2048.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend``</span>

<span class="sd">    Example:</span>
<span class="sd">        &gt;&gt;&gt; kernel_size = (1, 1, 2, 2, 2)</span>
<span class="sd">        &gt;&gt;&gt; strides = (1, 1, 1, 1, 1)</span>
<span class="sd">        &gt;&gt;&gt; padding = &quot;VALID&quot;</span>
<span class="sd">        &gt;&gt;&gt; input_x = P.Reshape()(Tensor(np.arange(1, 28), mstype.float16), (1, 1, 3, 3, 3))</span>
<span class="sd">        &gt;&gt;&gt; output_y = P.ExtractVolumePatches(kernel_size, strides, padding)(input_x)</span>
<span class="sd">        &gt;&gt;&gt; print(output_y.shape)</span>
<span class="sd">        (1, 8, 2, 2, 2)</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="nd">@prim_attr_register</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">kernel_size</span><span class="p">,</span> <span class="n">strides</span><span class="p">,</span> <span class="n">padding</span><span class="p">):</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;kernel_size&quot;</span><span class="p">,</span> <span class="n">kernel_size</span><span class="p">,</span> <span class="p">(</span><span class="nb">int</span><span class="p">,</span> <span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">),</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;strides&quot;</span><span class="p">,</span> <span class="n">strides</span><span class="p">,</span> <span class="p">(</span><span class="nb">int</span><span class="p">,</span> <span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">),</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">kernel_size</span><span class="p">,</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)):</span>
            <span class="n">kernel_size</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">kernel_size</span><span class="p">)</span>
            <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">kernel_size</span><span class="p">)</span> <span class="o">==</span> <span class="mi">5</span><span class="p">:</span>
                <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="n">kernel_size</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="s2">&quot;kernel_size[0]&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
                <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="n">kernel_size</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="s2">&quot;kernel_size[1]&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">strides</span><span class="p">,</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)):</span>
            <span class="n">strides</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">strides</span><span class="p">)</span>
            <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">strides</span><span class="p">)</span> <span class="o">==</span> <span class="mi">5</span><span class="p">:</span>
                <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="n">strides</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="s2">&quot;strides[0]&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
                <span class="n">validator</span><span class="o">.</span><span class="n">check_int</span><span class="p">(</span><span class="n">strides</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="mi">1</span><span class="p">,</span> <span class="n">Rel</span><span class="o">.</span><span class="n">EQ</span><span class="p">,</span> <span class="s2">&quot;strides[1]&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span> <span class="o">=</span> <span class="n">_check_3d_int_or_tuple</span><span class="p">(</span><span class="s2">&quot;kernel_size&quot;</span><span class="p">,</span> <span class="n">kernel_size</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">,</span>
                                                  <span class="n">allow_five</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">ret_five</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">greater_zero</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">strides</span> <span class="o">=</span> <span class="n">_check_3d_int_or_tuple</span><span class="p">(</span><span class="s2">&quot;strides&quot;</span><span class="p">,</span> <span class="n">strides</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">,</span>
                                              <span class="n">allow_five</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">ret_five</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">greater_zero</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s2">&quot;kernel_size&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s2">&quot;strides&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">strides</span><span class="p">)</span>
        <span class="n">validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;padding_dtype&quot;</span><span class="p">,</span> <span class="n">padding</span><span class="p">,</span> <span class="p">(</span><span class="nb">str</span><span class="p">),</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">padding</span> <span class="o">=</span> <span class="n">validator</span><span class="o">.</span><span class="n">check_string</span><span class="p">(</span><span class="n">padding</span><span class="o">.</span><span class="n">upper</span><span class="p">(),</span> <span class="p">[</span><span class="s1">&#39;VALID&#39;</span><span class="p">,</span> <span class="s1">&#39;SAME&#39;</span><span class="p">],</span> <span class="s1">&#39;padding&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add_prim_attr</span><span class="p">(</span><span class="s2">&quot;padding&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">padding</span><span class="p">)</span></div>
</pre></div>

           </div>
           
          </div>
          <footer>

  <hr/>

  <div role="contentinfo">
    <p>
        &#169; Copyright 2021, MindSpore.

    </p>
  </div>
    
    
    
    Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>
        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
   

</body>
</html>