

<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8" />
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
  
  <title>mindspore.boost &mdash; MindSpore master documentation</title>
  

  
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />

  
  

  
  

  

  
  <!--[if lt IE 9]>
    <script src="../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
        <script src="../_static/jquery.js"></script>
        <script src="../_static/underscore.js"></script>
        <script src="../_static/doctools.js"></script>
        <script src="../_static/language_data.js"></script>
        <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
    
    <script type="text/javascript" src="../_static/js/theme.js"></script>

    
    <link rel="index" title="Index" href="../genindex.html" />
    <link rel="search" title="Search" href="../search.html" />
    <link rel="prev" title="mindspore.train" href="mindspore.train.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../index.html" class="icon icon-home"> MindSpore
          

          
          </a>

          
            
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">MindSpore Python API</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="mindspore.html">mindspore</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.common.initializer.html">mindspore.common.initializer</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.communication.html">mindspore.communication</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.compression.html">mindspore.compression</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.context.html">mindspore.context</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.html">mindspore.dataset</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.audio.html">mindspore.dataset.audio</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.config.html">mindspore.dataset.config</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.text.html">mindspore.dataset.text</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.transforms.html">mindspore.dataset.transforms</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.vision.html">mindspore.dataset.vision</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.mindrecord.html">mindspore.mindrecord</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.nn.html">mindspore.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.nn.probability.html">mindspore.nn.probability</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.nn.transformer.html">mindspore.nn.transformer</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.numpy.html">mindspore.numpy</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.ops.html">mindspore.ops</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.parallel.html">mindspore.parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.parallel.nn.html">mindspore.parallel.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.profiler.html">mindspore.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.scipy.html">mindspore.scipy</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.train.html">mindspore.train</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">mindspore.boost</a></li>
</ul>
<p class="caption"><span class="caption-text">MindSpore C++ API</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://www.mindspore.cn/lite/api/zh-CN/master/api_cpp/mindspore.html">MindSpore Lite↗</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../index.html">MindSpore</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          

















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../index.html" class="icon icon-home"></a> &raquo;</li>
        
      <li>mindspore.boost</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
          
            <a href="../_sources/api_python/mindspore.boost.rst.txt" rel="nofollow"> View page source</a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="mindspore-boost">
<h1>mindspore.boost<a class="headerlink" href="#mindspore-boost" title="Permalink to this headline">¶</a></h1>
<p>Boost能够自动加速网络，如减少BN/梯度冻结/累积梯度等。</p>
<p>注：此特性为测试版本，我们仍在改进其功能。</p>
<dl class="class">
<dt id="mindspore.boost.AdaSum">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.boost.</code><code class="sig-name descname">AdaSum</code><span class="sig-paren">(</span><em class="sig-param">rank</em>, <em class="sig-param">device_number</em>, <em class="sig-param">group_number</em>, <em class="sig-param">parameter_tuple</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.AdaSum" title="Permalink to this definition">¶</a></dt>
<dd><p>Adaptive Summation(AdaSum)是一种优化深度学习模型并行训练的算法，它可以提升不同规模集群训练的精度，减小不同规模集群调参难度。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>rank</strong> (int) – 总的训练的卡数。</p></li>
<li><p><strong>device_number</strong> (int) – 单机的卡数。</p></li>
<li><p><strong>group_number</strong> (int) – 分组的数量。</p></li>
<li><p><strong>parameter_tuple</strong> (Tuple(Parameter)) – 网络训练权重组成的元组。</p></li>
</ul>
<p><strong>输入：</strong></p>
<ul class="simple">
<li><p><strong>delta_weights</strong> (Tuple(Tensor)) – 梯度tuple。</p></li>
<li><p><strong>parameters</strong> (Tuple(Parameter)) – 当前权重组成的元组。</p></li>
<li><p><strong>old_parameters</strong> (Tuple(Parameter)) – 旧的权重组成的元组。</p></li>
</ul>
<p><strong>输出：</strong></p>
<p>Tuple(Tensor), adasum处理后更新的权重。</p>
</dd></dl>

<dl class="class">
<dt id="mindspore.boost.AutoBoost">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.boost.</code><code class="sig-name descname">AutoBoost</code><span class="sig-paren">(</span><em class="sig-param">level=&quot;O0&quot;</em>, <em class="sig-param">boost_config_dict=&quot;&quot;</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.AutoBoost" title="Permalink to this definition">¶</a></dt>
<dd><p>MindSpore自动优化算法库。</p>
<p><strong>参数：</strong></p>
<ul>
<li><p><strong>level</strong> (str) – Boost的配置级别。</p></li>
<li><p><strong>boost_config_dict</strong> (dict) – 用户可配置的超参字典，建议的格式如下：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="p">{</span>
    <span class="s2">&quot;boost&quot;</span><span class="p">:</span> <span class="p">{</span>
        <span class="s2">&quot;mode&quot;</span><span class="p">:</span> <span class="s2">&quot;auto&quot;</span><span class="p">,</span>
        <span class="s2">&quot;less_bn&quot;</span><span class="p">:</span> <span class="kc">False</span><span class="p">,</span>
        <span class="s2">&quot;grad_freeze&quot;</span><span class="p">:</span> <span class="kc">False</span><span class="p">,</span>
        <span class="s2">&quot;adasum&quot;</span><span class="p">:</span> <span class="kc">False</span><span class="p">,</span>
        <span class="s2">&quot;grad_accumulation&quot;</span><span class="p">:</span> <span class="kc">False</span><span class="p">,</span>
        <span class="s2">&quot;dim_reduce&quot;</span><span class="p">:</span> <span class="kc">False</span><span class="p">},</span>

    <span class="s2">&quot;common&quot;</span><span class="p">:</span> <span class="p">{</span>
        <span class="s2">&quot;gradient_split_groups&quot;</span><span class="p">:</span> <span class="p">[</span><span class="mi">50</span><span class="p">,</span> <span class="mi">100</span><span class="p">],</span>
        <span class="s2">&quot;device_number&quot;</span><span class="p">:</span> <span class="mi">8</span><span class="p">},</span>

    <span class="s2">&quot;less_bn&quot;</span><span class="p">:</span> <span class="p">{</span>
        <span class="s2">&quot;fn_flag&quot;</span><span class="p">:</span> <span class="kc">True</span><span class="p">,</span>
        <span class="s2">&quot;gc_flag&quot;</span><span class="p">:</span> <span class="kc">True</span><span class="p">},</span>

    <span class="s2">&quot;grad_freeze&quot;</span><span class="p">:</span> <span class="p">{</span>
        <span class="s2">&quot;param_groups&quot;</span><span class="p">:</span> <span class="mi">10</span><span class="p">,</span>
        <span class="s2">&quot;freeze_type&quot;</span><span class="p">:</span> <span class="mi">1</span><span class="p">,</span>
        <span class="s2">&quot;freeze_p&quot;</span><span class="p">:</span> <span class="mf">0.7</span><span class="p">,</span>
        <span class="s2">&quot;total_steps&quot;</span><span class="p">:</span> <span class="mi">65536</span><span class="p">},</span>

    <span class="s2">&quot;grad_accumulation&quot;</span><span class="p">:</span> <span class="p">{</span>
        <span class="s2">&quot;grad_accumulation_step&quot;</span><span class="p">:</span> <span class="mi">1</span><span class="p">},</span>

    <span class="s2">&quot;dim_reduce&quot;</span><span class="p">:</span> <span class="p">{</span>
        <span class="s2">&quot;rho&quot;</span><span class="p">:</span> <span class="mf">0.55</span><span class="p">,</span>
        <span class="s2">&quot;gamma&quot;</span><span class="p">:</span> <span class="mf">0.9</span><span class="p">,</span>
        <span class="s2">&quot;alpha&quot;</span><span class="p">:</span> <span class="mf">0.001</span><span class="p">,</span>
        <span class="s2">&quot;sigma&quot;</span><span class="p">:</span> <span class="mf">0.4</span><span class="p">,</span>
        <span class="s2">&quot;n_components&quot;</span><span class="p">:</span> <span class="mi">32</span><span class="p">,</span>
        <span class="s2">&quot;pca_mat_path&quot;</span><span class="p">:</span> <span class="kc">None</span><span class="p">,</span>
        <span class="s2">&quot;weight_load_dir&quot;</span><span class="p">:</span> <span class="kc">None</span><span class="p">,</span>
        <span class="s2">&quot;timeout&quot;</span><span class="p">:</span> <span class="mi">1800</span><span class="p">}</span>

<span class="p">}</span>
</pre></div>
</div>
<ul>
<li><p>boost:</p>
<ul>
<li><p>mode (str): boost配置模式，支持 [“auto”, “manual”, “enable_all”, “disable_all”]。默认值: “auto”。</p>
<ul class="simple">
<li><p>auto: 自动配置，取决于Model类中的”boost_level”参数配置。</p></li>
<li><p>manual: 在”boost_config_dict”中人工配置。</p></li>
<li><p>enable_all: 开启所有boost算法。</p></li>
<li><p>disable_all: 关闭所有boost算法。</p></li>
</ul>
</li>
<li><p>less_bn (bool): 是否开启LessBN算法，默认: 不开启。</p></li>
<li><p>grad_freeze: (bool): 是否开启梯度冻结算法，默认: 不开启。</p></li>
<li><p>adasum (bool): 是否开启自适应求和算法，默认: 不开启。</p></li>
<li><p>grad_accumulation (bool): 是否开启梯度累加算法，默认: 不开启。</p></li>
<li><p>dim_reduce (bool): 是否开启降维训练算法，默认: 不开启。</p>
<p>如果开启dim_reduce算法，其他算法会失效。
如果开启grad_freeze算法，同时关闭dim_reduce，其他算法会失效。</p>
</li>
</ul>
</li>
<li><p>common:</p>
<ul class="simple">
<li><p>gradient_split_groups (list): 网络的梯度分割点，默认: [50, 100]。</p></li>
<li><p>device_number (int): 设备数，默认: 8。</p></li>
</ul>
</li>
<li><p>less_bn:</p>
<ul class="simple">
<li><p>fn_flag (bool): 是否采用fn替换fc，默认: 替换。</p></li>
<li><p>gc_flag (bool): 是否启用gc，默认: 启用gc。</p></li>
</ul>
</li>
<li><p>grad_freeze:</p>
<ul class="simple">
<li><p>param_groups (int): 参数分组数量，默认值: 10。</p></li>
<li><p>freeze_type (int): 梯度冻结策略，参数选择[0, 1]，默认值: 1。</p></li>
<li><p>freeze_p (float): 梯度冻结概率，默认值: 0.7。</p></li>
<li><p>total_steps (int): 总训练步数，默认值: 65536。</p></li>
</ul>
</li>
<li><p>grad_accumulation:</p>
<ul class="simple">
<li><p>grad_accumulation_step (int): 累加梯度的步数，默认值: 1。</p></li>
</ul>
</li>
<li><p>dim_reduce:</p>
<p>dim_reduce主要原理：</p>
<div class="math notranslate nohighlight">
\[\begin{split}\begin{align}
grad\_k &amp;= pca\_mat \cdot grad\\
dk &amp;= - bk \cdot grad\_k\\
sk &amp;= rho ^ m \cdot dk\\
delta\_loss &amp;= sigma \cdot grad\_k.T \cdot sk
\end{align}\end{split}\]</div>
<p>其中:</p>
<ul class="simple">
<li><p>pca_mat (array): 维度(k*n)，k是n_components的大小，n是权重的大小。</p></li>
<li><p>bk (array): 维度(k*k)，bk是拟牛顿法中的对称正定矩阵。</p></li>
</ul>
<p>我们需要找到满足以下条件的m:</p>
<div class="math notranslate nohighlight">
\[new\_loss &lt; old\_loss + delta\_loss\]</div>
<p>然后使用delta_grad去更新模型的权重:</p>
<div class="math notranslate nohighlight">
\[\begin{split}\begin{align}
grad\_k\_proj &amp;= pca\_mat.T \cdot grad\_k\\
new\_grad\_momentum &amp;= gamma \cdot old\_grad\_momentum + grad - grad\_k\_proj\\
delta\_grad &amp;= alpha \cdot new\_grad\_momentum - pca\_mat.T \cdot sk
\end{align}\end{split}\]</div>
<ul class="simple">
<li><p>rho (float): 超参，一般无需调整，默认值: 0.55。</p></li>
<li><p>gamma (float): 超参，一般无需调整，默认值: 0.9。</p></li>
<li><p>alpha (float): 超参，一般无需调整，默认值: 0.001。</p></li>
<li><p>sigma (float): 超参，一般无需调整，默认值: 0.4。</p></li>
<li><p>n_components (int): PCA后的维度，默认值: 32。</p></li>
<li><p>pca_mat_path (str): PCA矩阵的加载路径，默认值: None。</p></li>
<li><p>weight_load_dir (str): 以checkpoint形式保存的权重加载路径，用于计算PCA矩阵，默认值: None。</p></li>
<li><p>timeout (int): 加载PCA矩阵的最长等待时间，默认值: 1800(s)。</p></li>
</ul>
</li>
</ul>
<p>用户可以通过加载JSON文件或者直接使用字典来配置boost_config_dict。
未配置的参数会使用默认值。</p>
</li>
</ul>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>ValueError</strong> – Boost的模式不在[“auto”, “manual”, “enable_all”, “disable_all”]这个列表中。</p></li>
</ul>
<p><strong>支持平台：</strong></p>
<p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code></p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.boost</span> <span class="kn">import</span> <span class="n">AutoBoost</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1">#1) when configuring the dict directly:</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">boost_config_dict</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;boost&quot;</span><span class="p">:</span> <span class="p">{</span><span class="s2">&quot;mode&quot;</span><span class="p">:</span> <span class="s2">&quot;auto&quot;</span><span class="p">}}</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">boost</span> <span class="o">=</span> <span class="n">AutoBoost</span><span class="p">(</span><span class="s2">&quot;O1&quot;</span><span class="p">,</span> <span class="n">boost_config_dict</span><span class="p">)</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1">#2) when loading the dict from a json file:</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">json</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">boost_json</span> <span class="o">=</span> <span class="s2">&quot;/path/boost_config.json&quot;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="n">boost_json</span><span class="p">,</span> <span class="s1">&#39;r&#39;</span><span class="p">)</span> <span class="k">as</span> <span class="n">fp</span><span class="p">:</span>
<span class="gp">&gt;&gt;&gt; </span>    <span class="n">boost_config_dict</span> <span class="o">=</span> <span class="n">json</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">fp</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">boost</span> <span class="o">=</span> <span class="n">AutoBoost</span><span class="p">(</span><span class="s2">&quot;O1&quot;</span><span class="p">,</span> <span class="n">boost_config_dict</span><span class="p">)</span>
</pre></div>
</div>
<dl class="method">
<dt id="mindspore.boost.AutoBoost.network_auto_process_train">
<code class="sig-name descname">network_auto_process_train</code><span class="sig-paren">(</span><em class="sig-param">network</em>, <em class="sig-param">optimizer</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.AutoBoost.network_auto_process_train" title="Permalink to this definition">¶</a></dt>
<dd><p>使用Boost算法训练。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p>network (Cell)，训练网络。</p></li>
<li><p>optimizer (Union[Cell])，用于更新权重的优化器。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.boost.AutoBoost.network_auto_process_eval">
<code class="sig-name descname">network_auto_process_eval</code><span class="sig-paren">(</span><em class="sig-param">network</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.AutoBoost.network_auto_process_eval" title="Permalink to this definition">¶</a></dt>
<dd><p>使用Boost算法推理。</p>
<p><strong>参数：</strong></p>
<p>network(Cell)，推理网络。</p>
</dd></dl>

</dd></dl>

<dl class="class">
<dt id="mindspore.boost.BoostTrainOneStepCell">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.boost.</code><code class="sig-name descname">BoostTrainOneStepCell</code><span class="sig-paren">(</span><em class="sig-param">network</em>, <em class="sig-param">optimizer</em>, <em class="sig-param">sens=1.0</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.BoostTrainOneStepCell" title="Permalink to this definition">¶</a></dt>
<dd><p>Boost网络训练封装类。</p>
<p>用优化器封装网络，使用输入训练网络来获取结果。反向图在*construct*函数中自动创建，并且支持多种不同的并行模式。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>network</strong> (Cell) – 训练网络，当前网络只支持单个输出。</p></li>
<li><p><strong>optimizer</strong> (Union[Cell]) – 用于更新网络参数的优化器。</p></li>
<li><p><strong>sens</strong> (numbers.Number) – 作为反向传播输入要填充的缩放数，默认值为1.0。</p></li>
</ul>
<p><strong>输入：</strong></p>
<ul class="simple">
<li><p><strong>(*inputs)</strong> (Tuple(Tensor))- 网络的所有输入组成的元组。</p></li>
</ul>
<p><strong>输出：</strong></p>
<p>Tuple，包含三个Tensor，分别为损失函数值、溢出状态和当前损失缩放系数。</p>
<ul class="simple">
<li><p>loss(Tensor)，标量Tensor。</p></li>
<li><p>overflow(Tensor)，标量Tensor，类型为bool。</p></li>
<li><p>loss scaling value(Tensor)，标量Tensor。</p></li>
</ul>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>TypeError</strong> – 如果*sens*不是一个数字。</p></li>
</ul>
<p><strong>支持平台：</strong></p>
<p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code> <code class="docutils literal notranslate"><span class="pre">CPU</span></code></p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">boost</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss_fn</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optim</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1">#1) Using the WithLossCell existing provide</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss_net</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">WithLossCell</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">train_net</span> <span class="o">=</span> <span class="n">boost</span><span class="o">.</span><span class="n">BoostTrainOneStepCell</span><span class="p">(</span><span class="n">loss_net</span><span class="p">,</span> <span class="n">optim</span><span class="p">)</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1">#2) Using user-defined WithLossCell</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">class</span> <span class="nc">MyWithLossCell</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
<span class="gp">... </span>   <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">backbone</span><span class="p">,</span> <span class="n">loss_fn</span><span class="p">):</span>
<span class="gp">... </span>       <span class="nb">super</span><span class="p">(</span><span class="n">MyWithLossCell</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">auto_prefix</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="gp">... </span>       <span class="bp">self</span><span class="o">.</span><span class="n">_backbone</span> <span class="o">=</span> <span class="n">backbone</span>
<span class="gp">... </span>       <span class="bp">self</span><span class="o">.</span><span class="n">_loss_fn</span> <span class="o">=</span> <span class="n">loss_fn</span>
<span class="gp">...</span>
<span class="gp">... </span>   <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">label</span><span class="p">):</span>
<span class="gp">... </span>       <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_backbone</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span>
<span class="gp">... </span>       <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_loss_fn</span><span class="p">(</span><span class="n">out</span><span class="p">,</span> <span class="n">label</span><span class="p">)</span>
<span class="gp">...</span>
<span class="gp">... </span>   <span class="nd">@property</span>
<span class="gp">... </span>   <span class="k">def</span> <span class="nf">backbone_network</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="gp">... </span>       <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_backbone</span>
<span class="gp">...</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss_net</span> <span class="o">=</span> <span class="n">MyWithLossCell</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">train_net</span> <span class="o">=</span> <span class="n">boost</span><span class="o">.</span><span class="n">BoostTrainOneStepCell</span><span class="p">(</span><span class="n">loss_net</span><span class="p">,</span> <span class="n">optim</span><span class="p">)</span>
</pre></div>
</div>
<dl class="method">
<dt id="mindspore.boost.BoostTrainOneStepCell.gradient_freeze_process">
<code class="sig-name descname">gradient_freeze_process</code><span class="sig-paren">(</span><em class="sig-param">*inputs</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.BoostTrainOneStepCell.gradient_freeze_process" title="Permalink to this definition">¶</a></dt>
<dd><p>使用梯度冻结算法训练。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>inputs</strong> (Tuple(Tensor)) – 网络训练的输入。</p></li>
</ul>
<p><strong>返回：</strong></p>
<p>number，网络训练过程中得到的loss值。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.boost.BoostTrainOneStepCell.gradient_accumulation_process">
<code class="sig-name descname">gradient_accumulation_process</code><span class="sig-paren">(</span><em class="sig-param">loss</em>, <em class="sig-param">grads</em>, <em class="sig-param">sens</em>, <em class="sig-param">*inputs</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.BoostTrainOneStepCell.gradient_accumulation_process" title="Permalink to this definition">¶</a></dt>
<dd><p>使用梯度累积算法训练。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>loss</strong> (Tensor) – 网络训练的loss值。</p></li>
<li><p><strong>grads</strong> (Tuple(Tensor)) – 网络训练过程中的梯度。</p></li>
<li><p><strong>sens</strong> (Tensor) – 作为反向传播输入要填充的缩放数。</p></li>
<li><p><strong>inputs</strong> (Tuple(Tensor)) – 网络训练的输入。</p></li>
</ul>
<p><strong>返回：</strong></p>
<p>number，网络训练过程中得到的loss值。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.boost.BoostTrainOneStepCell.adasum_process">
<code class="sig-name descname">adasum_process</code><span class="sig-paren">(</span><em class="sig-param">loss</em>, <em class="sig-param">grads</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.BoostTrainOneStepCell.adasum_process" title="Permalink to this definition">¶</a></dt>
<dd><p>使用Adasum算法训练。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>loss</strong> (Tensor) – 网络训练的loss值。</p></li>
<li><p><strong>grads</strong> (Tuple(Tensor)) – 网络训练过程中的梯度。</p></li>
</ul>
<p><strong>返回：</strong></p>
<p>number，网络训练过程中得到的loss值。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.boost.BoostTrainOneStepCell.check_adasum_enable">
<code class="sig-name descname">check_adasum_enable</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.BoostTrainOneStepCell.check_adasum_enable" title="Permalink to this definition">¶</a></dt>
<dd><p>Adasum算法仅在多卡或者多机场景生效，并且要求卡数符合2的n次方，该函数用来判断adasum算法能否生效。</p>
<p><strong>返回：</strong></p>
<p>enable_adasum (bool)，Adasum算法是否生效。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.boost.BoostTrainOneStepCell.check_dim_reduce_enable">
<code class="sig-name descname">check_dim_reduce_enable</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.BoostTrainOneStepCell.check_dim_reduce_enable" title="Permalink to this definition">¶</a></dt>
<dd><p>使用降维二阶训练算法训练。</p>
<p><strong>返回：</strong></p>
<p>enable_dim_reduce (bool)，降维二阶训练算法是否生效。</p>
</dd></dl>

</dd></dl>

<dl class="class">
<dt id="mindspore.boost.BoostTrainOneStepWithLossScaleCell">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.boost.</code><code class="sig-name descname">BoostTrainOneStepWithLossScaleCell</code><span class="sig-paren">(</span><em class="sig-param">network</em>, <em class="sig-param">optimizer</em>, <em class="sig-param">scale_sense</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.BoostTrainOneStepWithLossScaleCell" title="Permalink to this definition">¶</a></dt>
<dd><p>使用混合精度功能的Boost训练网络。</p>
<p>实现了包含损失缩放（loss scale）的单次训练。它使用网络、优化器和用于更新损失缩放系数（loss scale）的Cell(或一个Tensor)作为参数。可在host侧或device侧更新损失缩放系数。
如果需要在host侧更新，使用Tensor作为 <cite>scale_sense</cite> ，否则，使用可更新损失缩放系数的Cell实例作为 <cite>scale_sense</cite> 。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>network</strong> (Cell) – 训练网络，当前网络只支持单个输出。</p></li>
<li><p><strong>optimizer</strong> (Union[Cell]) – 用于更新网络参数的优化器。</p></li>
<li><p><strong>scale_sense</strong> (Union[Tensor, Cell]) - 如果此值为Cell类型，<cite>BoostTrainOneStepWithLossScaleCell</cite> 会调用它来更新损失缩放系数。如果此值为Tensor类型，可调用 <cite>set_sense_scale</cite> 来更新损失缩放系数，shape为 <span class="math notranslate nohighlight">\(()\)</span> 或 <span class="math notranslate nohighlight">\((1,)\)</span> 。</p></li>
</ul>
<p><strong>输入：</strong></p>
<ul class="simple">
<li><p><strong>(*inputs)</strong> (Tuple(Tensor))- 网络的所有输入组成的元组。</p></li>
</ul>
<p><strong>输出：</strong></p>
<p>Tuple，包含三个Tensor，分别为损失函数值、溢出状态和当前损失缩放系数。</p>
<ul class="simple">
<li><p>loss(Tensor)，标量Tensor。</p></li>
<li><p>overflow(Tensor)，标量Tensor，类型为bool。</p></li>
<li><p>loss scaling value(Tensor)，标量Tensor。</p></li>
</ul>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>TypeError</strong> - <cite>scale_sense</cite> 既不是Cell，也不是Tensor。</p></li>
<li><p><strong>ValueError</strong> - <cite>scale_sense</cite> 的shape既不是(1,)也不是()。</p></li>
</ul>
<p><strong>支持平台：</strong></p>
<p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Tensor</span><span class="p">,</span> <span class="n">Parameter</span><span class="p">,</span> <span class="n">nn</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">mindspore.ops</span> <span class="k">as</span> <span class="nn">ops</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.nn</span> <span class="kn">import</span> <span class="n">WithLossCell</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">dtype</span> <span class="k">as</span> <span class="n">mstype</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">boost</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">class</span> <span class="nc">Net</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Cell</span><span class="p">):</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span><span class="p">):</span>
<span class="gp">... </span>        <span class="nb">super</span><span class="p">(</span><span class="n">Net</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">ones</span><span class="p">([</span><span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)),</span>
<span class="gp">... </span>                                <span class="n">name</span><span class="o">=</span><span class="s1">&#39;weight&#39;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">matmul</span> <span class="o">=</span> <span class="n">ops</span><span class="o">.</span><span class="n">MatMul</span><span class="p">()</span>
<span class="gp">...</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
<span class="gp">... </span>        <span class="n">output</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span><span class="p">)</span>
<span class="gp">... </span>        <span class="k">return</span> <span class="n">output</span>
<span class="gp">...</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">size</span><span class="p">,</span> <span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span> <span class="o">=</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">10</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1">#1) when the type of scale_sense is Cell:</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">(</span><span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">MSELoss</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optimizer</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net_with_loss</span> <span class="o">=</span> <span class="n">WithLossCell</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">manager</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">DynamicLossScaleUpdateCell</span><span class="p">(</span><span class="n">loss_scale_value</span><span class="o">=</span><span class="mi">2</span><span class="o">**</span><span class="mi">12</span><span class="p">,</span> <span class="n">scale_factor</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">scale_window</span><span class="o">=</span><span class="mi">1000</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">train_network</span> <span class="o">=</span> <span class="n">boost</span><span class="o">.</span><span class="n">BoostTrainOneStepWithLossScaleCell</span><span class="p">(</span><span class="n">net_with_loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="p">,</span> <span class="n">scale_sense</span><span class="o">=</span><span class="n">manager</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">ones</span><span class="p">([</span><span class="n">out_features</span><span class="p">,</span> <span class="n">in_features</span><span class="p">]),</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">labels</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">ones</span><span class="p">([</span><span class="n">out_features</span><span class="p">,]),</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">train_network</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">labels</span><span class="p">)</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1">#2) when the type of scale_sense is Tensor:</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">(</span><span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">MSELoss</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optimizer</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net_with_loss</span> <span class="o">=</span> <span class="n">WithLossCell</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">inputs</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">ones</span><span class="p">([</span><span class="n">size</span><span class="p">,</span> <span class="n">in_features</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">))</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">label</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">zeros</span><span class="p">([</span><span class="n">size</span><span class="p">,</span> <span class="n">out_features</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">))</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">scaling_sens</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">full</span><span class="p">((</span><span class="mi">1</span><span class="p">),</span> <span class="n">np</span><span class="o">.</span><span class="n">finfo</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span><span class="o">.</span><span class="n">max</span><span class="p">),</span> <span class="n">dtype</span><span class="o">=</span><span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">train_network</span> <span class="o">=</span> <span class="n">boost</span><span class="o">.</span><span class="n">BoostTrainOneStepWithLossScaleCell</span><span class="p">(</span><span class="n">net_with_loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="p">,</span> <span class="n">scale_sense</span><span class="o">=</span><span class="n">scaling_sens</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">train_network</span><span class="p">(</span><span class="n">inputs</span><span class="p">,</span> <span class="n">label</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="class">
<dt id="mindspore.boost.DimReduce">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.boost.</code><code class="sig-name descname">DimReduce</code><span class="sig-paren">(</span><em class="sig-param">network</em>, <em class="sig-param">optimizer</em>, <em class="sig-param">weight</em>, <em class="sig-param">pca_mat_local</em>, <em class="sig-param">n_components</em>, <em class="sig-param">rho</em>, <em class="sig-param">gamma</em>, <em class="sig-param">alpha</em>, <em class="sig-param">sigma</em>, <em class="sig-param">rank</em>, <em class="sig-param">rank_size</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.DimReduce" title="Permalink to this definition">¶</a></dt>
<dd><p>降维训练(dimension reduce training)是一种优化深度学习模型训练的算法，它可以加速模型的收敛。</p>
<p>算法主要原理：</p>
<div class="math notranslate nohighlight">
\[\begin{split}\begin{align}
grad\_k &amp;= pca\_mat \cdot grad\\
dk &amp;= - bk \cdot grad\_k\\
sk &amp;= rho ^ m \cdot dk\\
delta\_loss &amp;= sigma \cdot grad\_k.T \cdot sk
\end{align}\end{split}\]</div>
<p>其中:</p>
<ul class="simple">
<li><p>pca_mat (array): 维度(k*n)，k是n_components的大小，n是权重的大小。</p></li>
<li><p>bk (array): 维度(k*k)，bk是拟牛顿法中的对称正定矩阵。</p></li>
</ul>
<p>我们需要找到满足以下条件的m:</p>
<div class="math notranslate nohighlight">
\[new\_loss &lt; old\_loss + delta\_loss\]</div>
<p>然后使用delta_grad去更新模型的权重:</p>
<div class="math notranslate nohighlight">
\[\begin{split}\begin{align}
grad\_k\_proj &amp;= pca\_mat.T \cdot grad\_k\\
new\_grad\_momentum &amp;= gamma \cdot old\_grad\_momentum + grad - grad\_k\_proj\\
delta\_grad &amp;= alpha \cdot new\_grad\_momentum - pca\_mat.T \cdot sk
\end{align}\end{split}\]</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>network</strong> (Cell) - 训练网络，只支持单输出。</p></li>
<li><p><strong>optimizer</strong> (Union[Cell]) - 更新权重的优化器。</p></li>
<li><p><strong>weight</strong> (Tuple(Parameter)) - 网络权重组成的元组。</p></li>
<li><p><strong>pca_mat_local</strong> (numpy.ndarray) - 用于PCA操作的，经过切分的PCA转换矩阵，维度为k*n，k是切分的n_components的大小，n是权重的大小。</p></li>
<li><p><strong>n_components</strong> (int) - PCA的主成分维度(components)。</p></li>
<li><p><strong>rho</strong> (float) - 超参。</p></li>
<li><p><strong>gamma</strong> (float) - 超参。</p></li>
<li><p><strong>alpha</strong> (float) - 超参。</p></li>
<li><p><strong>sigma</strong> (float) - 超参。</p></li>
<li><p><strong>rank</strong> (int) - Rank编号。</p></li>
<li><p><strong>rank_size</strong> (int) - Rank总数。</p></li>
</ul>
<p><strong>输入：</strong></p>
<ul class="simple">
<li><p><strong>loss</strong> (Tensor) - 标量Tensor。</p></li>
<li><p><strong>old_grad</strong> (Tuple(Tensor)) - 网络权重提取组成的元组。</p></li>
<li><p><strong>weight</strong> (Tuple(Tensor)) - 网络权重组成的元组。</p></li>
<li><p><strong>weight_clone</strong> (Tuple(Tensor)) - 网络权重的副本。</p></li>
<li><p><strong>(*inputs)</strong> (Tuple(Tensor)) - 网络的所有输入组成的元组。</p></li>
</ul>
<p><strong>输出：</strong></p>
<ul class="simple">
<li><p><strong>loss</strong> (Tensor) - 标量Tensor。</p></li>
</ul>
</dd></dl>

<dl class="class">
<dt id="mindspore.boost.GradientFreeze">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.boost.</code><code class="sig-name descname">GradientFreeze</code><span class="sig-paren">(</span><em class="sig-param">param_groups</em>, <em class="sig-param">freeze_type</em>, <em class="sig-param">freeze_p</em>, <em class="sig-param">total_steps</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.GradientFreeze" title="Permalink to this definition">¶</a></dt>
<dd><p>梯度冻结算法，根据指定策略随机冻结某些层的梯度，来提升网络训练性能。
冻结的层数和冻结的概率均可由用户配置。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>param_groups</strong> (Union[tuple, list]) – 梯度冻结训练的权重。</p></li>
<li><p><strong>freeze_type</strong> (int) – 梯度冻结训练的策略。</p></li>
<li><p><strong>freeze_p</strong> (float) – 梯度冻结训练的概率。</p></li>
<li><p><strong>total_steps</strong> (numbers.Number) – 整个训练过程的总的步数。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">gradient_freeze_class</span> <span class="o">=</span> <span class="n">boost</span><span class="o">.</span><span class="n">GradientFreeze</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mf">0.5</span><span class="p">,</span> <span class="mi">2000</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">network</span><span class="p">,</span> <span class="n">optimizer</span> <span class="o">=</span> <span class="n">gradient_freeze_class</span><span class="o">.</span><span class="n">freeze_generate</span><span class="p">(</span><span class="n">network</span><span class="p">,</span> <span class="n">optimizer</span><span class="p">)</span>
</pre></div>
</div>
<dl class="method">
<dt id="mindspore.boost.GradientFreeze.generate_freeze_index_sequence">
<code class="sig-name descname">generate_freeze_index_sequence</code><span class="sig-paren">(</span><em class="sig-param">parameter_groups_number</em>, <em class="sig-param">freeze_strategy</em>, <em class="sig-param">freeze_p</em>, <em class="sig-param">total_steps</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.GradientFreeze.generate_freeze_index_sequence" title="Permalink to this definition">¶</a></dt>
<dd><p>生成梯度冻结每一步需要冻结的层数。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>parameter_groups_number</strong> (numbers.Number) – 梯度冻结训练的权重个数。</p></li>
<li><p><strong>freeze_strategy</strong> (int) – 梯度冻结训练的策略。</p></li>
<li><p><strong>freeze_p</strong> (float) – 梯度冻结训练的概率。</p></li>
<li><p><strong>total_steps</strong> (numbers.Number) – 整个训练过程的总的步数。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.boost.GradientFreeze.split_parameters_groups">
<code class="sig-name descname">split_parameters_groups</code><span class="sig-paren">(</span><em class="sig-param">net</em>, <em class="sig-param">freeze_para_groups_number</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.GradientFreeze.split_parameters_groups" title="Permalink to this definition">¶</a></dt>
<dd><p>拆分用于梯度冻结训练的权重。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>net</strong> (Cell) – 训练网络。</p></li>
<li><p><strong>freeze_para_groups_number</strong> (numbers.Number) – 梯度冻结训练的权重个数。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.boost.GradientFreeze.freeze_generate">
<code class="sig-name descname">freeze_generate</code><span class="sig-paren">(</span><em class="sig-param">network</em>, <em class="sig-param">optimizer</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.GradientFreeze.freeze_generate" title="Permalink to this definition">¶</a></dt>
<dd><p>生成梯度冻结的网络与优化器。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>network</strong> (Cell) – 训练网络。</p></li>
<li><p><strong>optimizer</strong> (Union[Cell]) – 用于更新权重的优化器。</p></li>
</ul>
</dd></dl>

</dd></dl>

<dl class="function">
<dt id="freeze_cell">
<code class="sig-name descname">freeze_cell</code><span class="sig-paren">(</span><em class="sig-param">reducer_flag</em>, <em class="sig-param">network</em>, <em class="sig-param">optimizer</em>, <em class="sig-param">sens</em>, <em class="sig-param">grad</em>, <em class="sig-param">use_grad_accumulation</em>, <em class="sig-param">mean=None</em>, <em class="sig-param">degree=None</em>, <em class="sig-param">max_accumulation_step=1</em><span class="sig-paren">)</span><a class="headerlink" href="#freeze_cell" title="Permalink to this definition">¶</a></dt>
<dd><p>提供带梯度冻结的网络Cell。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>reducer_flag</strong> (bool): 是否多卡训练的标志位。</p></li>
<li><p><strong>network</strong> (Cell): 训练网络。</p></li>
<li><p><strong>optimizer</strong> (Cell): 优化器。</p></li>
<li><p><strong>sens</strong> (numbers.Number): 损失缩放系数。</p></li>
<li><p><strong>grad</strong> (tuple(Tensor)): 网络梯度。</p></li>
<li><p><strong>use_grad_accumulation</strong> (bool): 是否使用梯度累积。</p></li>
<li><p><strong>mean</strong> (bool): 梯度是否求平均。默认值为None。</p></li>
<li><p><strong>degree</strong> (int): device卡数。默认值为None。</p></li>
<li><p><strong>max_accumulation_step</strong> (int): 梯度累积步数。默认值为1。</p></li>
</ul>
</dd></dl>

<dl class="class">
<dt id="mindspore.boost.FreezeOpt">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.boost.</code><code class="sig-name descname">FreezeOpt</code><span class="sig-paren">(</span><em class="sig-param">opt</em>, <em class="sig-param">train_parameter_groups=None</em>, <em class="sig-param">train_strategy=None</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.FreezeOpt" title="Permalink to this definition">¶</a></dt>
<dd><p>支持梯度冻结训练的优化器。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>opt</strong> (Cell) – 非冻结优化器实例，如*Momentum*，<em>SGD</em>。</p></li>
<li><p><strong>train_parameter_groups</strong> (Union[tuple, list]) – 梯度冻结训练的权重。</p></li>
<li><p><strong>train_strategy</strong> (Union[tuple(int), list(int), Tensor]) – 梯度冻结训练的策略。</p></li>
</ul>
</dd></dl>

<dl class="class">
<dt id="mindspore.boost.GradientAccumulation">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.boost.</code><code class="sig-name descname">GradientAccumulation</code><span class="sig-paren">(</span><em class="sig-param">max_accumulation_step</em>, <em class="sig-param">optimizer</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.GradientAccumulation" title="Permalink to this definition">¶</a></dt>
<dd><p>梯度累积算法，在累积多个step的梯度之后，再用来更新网络权重，可以提高训练效率。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>max_accumulation_step</strong> (int) – 累积梯度的步数。</p></li>
<li><p><strong>optimizer</strong> (Cell) – 网络训练使用的优化器。</p></li>
</ul>
</dd></dl>

<dl class="class">
<dt id="mindspore.boost.LessBN">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.boost.</code><code class="sig-name descname">LessBN</code><span class="sig-paren">(</span><em class="sig-param">network</em>, <em class="sig-param">fn_flag=False</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.LessBN" title="Permalink to this definition">¶</a></dt>
<dd><p>LessBN算法，可以在不损失网络精度的前提下，自动减少网络中批归一化（Batch Normalization）的数量，来提升网络性能。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>network</strong> (Cell) – 待训练的网络模型。</p></li>
<li><p><strong>fn_flag</strong> (bool) – 是否将网络中最后一个全连接层替换为全归一化层。默认值：False。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">network</span> <span class="o">=</span> <span class="n">boost</span><span class="o">.</span><span class="n">LessBN</span><span class="p">(</span><span class="n">network</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="class">
<dt id="mindspore.boost.OptimizerProcess">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.boost.</code><code class="sig-name descname">OptimizerProcess</code><span class="sig-paren">(</span><em class="sig-param">opt</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.OptimizerProcess" title="Permalink to this definition">¶</a></dt>
<dd><p>处理Boost的优化器，目前支持给优化器添加梯度中心化和创建新的优化器。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>opt</strong> (Cell) – 使用的优化器。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Tensor</span><span class="p">,</span> <span class="n">Parameter</span><span class="p">,</span> <span class="n">nn</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">ops</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.boost</span> <span class="kn">import</span> <span class="n">OptimizerProcess</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">class</span> <span class="nc">Net</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Cell</span><span class="p">):</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span><span class="p">):</span>
<span class="gp">... </span>        <span class="nb">super</span><span class="p">(</span><span class="n">Net</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">ones</span><span class="p">([</span><span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)),</span>
<span class="gp">... </span>                                <span class="n">name</span><span class="o">=</span><span class="s1">&#39;weight&#39;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">matmul</span> <span class="o">=</span> <span class="n">ops</span><span class="o">.</span><span class="n">MatMul</span><span class="p">()</span>
<span class="gp">...</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
<span class="gp">... </span>        <span class="n">output</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span><span class="p">)</span>
<span class="gp">... </span>        <span class="k">return</span> <span class="n">output</span>
<span class="gp">...</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">size</span><span class="p">,</span> <span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span> <span class="o">=</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">10</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">network</span> <span class="o">=</span> <span class="n">Net</span><span class="p">(</span><span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optimizer</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optimizer_process</span> <span class="o">=</span> <span class="n">OptimizerProcess</span><span class="p">(</span><span class="n">optimizer</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optimizer_process</span><span class="o">.</span><span class="n">add_grad_centralization</span><span class="p">(</span><span class="n">network</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optimizer</span> <span class="o">=</span> <span class="n">optimizer_process</span><span class="o">.</span><span class="n">generate_new_optimizer</span><span class="p">()</span>
</pre></div>
</div>
<dl class="method">
<dt id="mindspore.boost.OptimizerProcess.add_grad_centralization">
<code class="sig-name descname">add_grad_centralization</code><span class="sig-paren">(</span><em class="sig-param">network</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.OptimizerProcess.add_grad_centralization" title="Permalink to this definition">¶</a></dt>
<dd><p>添加梯度中心化。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>network</strong> (Cell) – 训练网络。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.boost.OptimizerProcess.build_params_dict">
<code class="sig-name descname">build_params_dict</code><span class="sig-paren">(</span><em class="sig-param">network</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.OptimizerProcess.build_params_dict" title="Permalink to this definition">¶</a></dt>
<dd><p>构建网络权重的dict。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>network</strong> (Cell) – 训练网络。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.boost.OptimizerProcess.build_gc_params_group">
<code class="sig-name descname">build_gc_params_group</code><span class="sig-paren">(</span><em class="sig-param">params_dict</em>, <em class="sig-param">parameters</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.OptimizerProcess.build_gc_params_group" title="Permalink to this definition">¶</a></dt>
<dd><p>构建网络权重的dict。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>params_dict</strong> (dict) – 训练权重的字典。</p></li>
<li><p><strong>parameters</strong> (list) – 训练权重的列表。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.boost.OptimizerProcess.generate_new_optimizer">
<code class="sig-name descname">generate_new_optimizer</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.OptimizerProcess.generate_new_optimizer" title="Permalink to this definition">¶</a></dt>
<dd><p>生成新的优化器。</p>
</dd></dl>

</dd></dl>

<dl class="class">
<dt id="mindspore.boost.ParameterProcess">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.boost.</code><code class="sig-name descname">ParameterProcess</code><a class="headerlink" href="#mindspore.boost.ParameterProcess" title="Permalink to this definition">¶</a></dt>
<dd><p>处理Boost网络的权重。当前支持创建分组参数和自动设置网络梯度切分点。</p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Tensor</span><span class="p">,</span> <span class="n">Parameter</span><span class="p">,</span> <span class="n">nn</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">mindspore.ops</span> <span class="k">as</span> <span class="nn">ops</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.boost</span> <span class="kn">import</span> <span class="n">OptimizerProcess</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">class</span> <span class="nc">Net</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Cell</span><span class="p">):</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span><span class="p">):</span>
<span class="gp">... </span>        <span class="nb">super</span><span class="p">(</span><span class="n">Net</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">ones</span><span class="p">([</span><span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)),</span>
<span class="gp">... </span>                                <span class="n">name</span><span class="o">=</span><span class="s1">&#39;weight&#39;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">weight2</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">ones</span><span class="p">([</span><span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)),</span>
<span class="gp">... </span>                                <span class="n">name</span><span class="o">=</span><span class="s1">&#39;weight2&#39;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">matmul</span> <span class="o">=</span> <span class="n">ops</span><span class="o">.</span><span class="n">MatMul</span><span class="p">()</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">matmul2</span> <span class="o">=</span> <span class="n">ops</span><span class="o">.</span><span class="n">MatMul</span><span class="p">()</span>
<span class="gp">...</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
<span class="gp">... </span>        <span class="n">output</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span><span class="p">)</span>
<span class="gp">... </span>        <span class="n">output2</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">matmul2</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight2</span><span class="p">)</span>
<span class="gp">... </span>        <span class="k">return</span> <span class="n">output</span> <span class="o">+</span> <span class="n">output2</span>
<span class="gp">...</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">size</span><span class="p">,</span> <span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span> <span class="o">=</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">10</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">network</span> <span class="o">=</span> <span class="n">Net</span><span class="p">(</span><span class="n">in_features</span><span class="p">,</span> <span class="n">out_features</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">new_parameter</span> <span class="o">=</span> <span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">()[:</span><span class="mi">1</span><span class="p">]</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">parameter_process</span> <span class="o">=</span> <span class="n">ParameterProcess</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">group_params</span> <span class="o">=</span> <span class="n">parameter_process</span><span class="o">.</span><span class="n">generate_group_params</span><span class="p">(</span><span class="n">new_parameter</span><span class="p">,</span> <span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">())</span>
</pre></div>
</div>
<dl class="method">
<dt id="mindspore.boost.ParameterProcess.assign_parameter_group">
<code class="sig-name descname">assign_parameter_group</code><span class="sig-paren">(</span><em class="sig-param">parameters</em>, <em class="sig-param">split_point=None</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.ParameterProcess.assign_parameter_group" title="Permalink to this definition">¶</a></dt>
<dd><p>设置分组权重。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>parameters</strong> (list) – 训练网络的权重。</p></li>
<li><p><strong>split_point</strong> (list) – 网络梯度切分点。默认为None。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.boost.ParameterProcess.generate_group_params">
<code class="sig-name descname">generate_group_params</code><span class="sig-paren">(</span><em class="sig-param">parameters</em>, <em class="sig-param">origin_params</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.boost.ParameterProcess.generate_group_params" title="Permalink to this definition">¶</a></dt>
<dd><p>创建分组权重。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>parameters</strong> (list) – 训练网络的新权重。</p></li>
<li><p><strong>origin_params</strong> (list) –  训练网络的初始权重。</p></li>
</ul>
</dd></dl>

</dd></dl>

</div>


           </div>
           
          </div>
          <footer>
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
        <a href="mindspore.train.html" class="btn btn-neutral float-left" title="mindspore.train" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
    </div>

  <hr/>

  <div role="contentinfo">
    <p>
        &#169; Copyright 2021, MindSpore.

    </p>
  </div>
    
    
    
    Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>
        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
   

</body>
</html>