

<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8" />
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
  
  <title>mindspore.train &mdash; MindSpore master documentation</title>
  

  
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />

  
  

  
  

  

  
  <!--[if lt IE 9]>
    <script src="../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
        <script src="../_static/jquery.js"></script>
        <script src="../_static/underscore.js"></script>
        <script src="../_static/doctools.js"></script>
        <script src="../_static/language_data.js"></script>
        <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
    
    <script type="text/javascript" src="../_static/js/theme.js"></script>

    
    <link rel="index" title="Index" href="../genindex.html" />
    <link rel="search" title="Search" href="../search.html" />
    <link rel="next" title="mindspore.boost" href="mindspore.boost.html" />
    <link rel="prev" title="mindspore.scipy.sparse.linalg.gmres" href="scipy/mindspore.scipy.sparse.linalg.gmres.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../index.html" class="icon icon-home"> MindSpore
          

          
          </a>

          
            
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">MindSpore Python API</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="mindspore.html">mindspore</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.common.initializer.html">mindspore.common.initializer</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.communication.html">mindspore.communication</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.compression.html">mindspore.compression</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.context.html">mindspore.context</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.html">mindspore.dataset</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.audio.html">mindspore.dataset.audio</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.config.html">mindspore.dataset.config</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.text.html">mindspore.dataset.text</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.transforms.html">mindspore.dataset.transforms</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.dataset.vision.html">mindspore.dataset.vision</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.mindrecord.html">mindspore.mindrecord</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.nn.html">mindspore.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.nn.probability.html">mindspore.nn.probability</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.nn.transformer.html">mindspore.nn.transformer</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.numpy.html">mindspore.numpy</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.ops.html">mindspore.ops</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.parallel.html">mindspore.parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.parallel.nn.html">mindspore.parallel.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.profiler.html">mindspore.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.scipy.html">mindspore.scipy</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">mindspore.train</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#mindspore-train-summary">mindspore.train.summary</a></li>
<li class="toctree-l2"><a class="reference internal" href="#mindspore-train-callback">mindspore.train.callback</a></li>
<li class="toctree-l2"><a class="reference internal" href="#mindspore-train-train-thor">mindspore.train.train_thor</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="mindspore.boost.html">mindspore.boost</a></li>
</ul>
<p class="caption"><span class="caption-text">MindSpore C++ API</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://www.mindspore.cn/lite/api/zh-CN/master/api_cpp/mindspore.html">MindSpore Lite↗</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../index.html">MindSpore</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          

















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../index.html" class="icon icon-home"></a> &raquo;</li>
        
      <li>mindspore.train</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
          
            <a href="../_sources/api_python/mindspore.train.rst.txt" rel="nofollow"> View page source</a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="mindspore-train">
<h1>mindspore.train<a class="headerlink" href="#mindspore-train" title="Permalink to this headline">¶</a></h1>
<div class="section" id="mindspore-train-summary">
<h2>mindspore.train.summary<a class="headerlink" href="#mindspore-train-summary" title="Permalink to this headline">¶</a></h2>
<p>使用SummaryRecord将需要的数据存储为summary文件和lineage文件，使用方法包括自定义回调函数和自定义训练循环。保存的summary文件使用MindInsight进行可视化分析。</p>
<dl class="class">
<dt id="mindspore.train.summary.SummaryRecord">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.summary.</code><code class="sig-name descname">SummaryRecord</code><span class="sig-paren">(</span><em class="sig-param">log_dir</em>, <em class="sig-param">file_prefix='events'</em>, <em class="sig-param">file_suffix='_MS'</em>, <em class="sig-param">network=None</em>, <em class="sig-param">max_file_size=None</em>, <em class="sig-param">raise_exception=False</em>, <em class="sig-param">export_options=None</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.summary.SummaryRecord" title="Permalink to this definition">¶</a></dt>
<dd><p>SummaryRecord用于记录summary数据和lineage数据。</p>
<p>该方法将在一个指定的目录中创建summary文件和lineage文件，并将数据写入文件。</p>
<p>它通过执行 <cite>record</cite> 方法将数据写入文件。除了通过 <a class="reference external" href="https://www.mindspore.cn/mindinsight/docs/zh-CN/master/summary_record.html#summarysummarycollector">summary算子</a> 记录网络的数据外，SummaryRecord还支持通过 <a class="reference external" href="https://www.mindspore.cn/mindinsight/docs/zh-CN/master/summary_record.html#callback">自定义回调函数和自定义训练循环</a> 记录数据。</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<ul class="simple">
<li><p>确保在最后关闭SummaryRecord，否则进程不会退出。请参阅下面的示例部分，了解如何用两种方式正确关闭SummaryRecord。</p></li>
<li><p>每次训练只允许创建一个SummaryRecord实例，否则会导致数据写入异常。</p></li>
<li><p>SummaryRecord仅支持Linux系统。</p></li>
<li><p>编译MindSpore时，设置 <cite>-s on</cite> 关闭维测功能后，SummaryRecord不可用。</p></li>
</ul>
</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>log_dir</strong> (str) - <cite>log_dir</cite> 是用来保存summary文件的目录。</p></li>
<li><p><strong>file_prefix</strong> (str) - 文件的前缀。默认值：<cite>events</cite> 。</p></li>
<li><p><strong>file_suffix</strong> (str) - 文件的后缀。默认值：<cite>_MS</cite> 。</p></li>
<li><p><strong>network</strong> (Cell) - 表示用于保存计算图的网络。默认值：None。</p></li>
<li><p><strong>max_file_size</strong> (int, 可选) - 可写入磁盘的每个文件的最大大小（以字节为单位）。例如，预期写入文件最大不超过4GB，则设置 <cite>max_file_size=4*1024**3</cite> 。默认值：None，表示无限制。</p></li>
<li><p><strong>raise_exception</strong> (bool, 可选) - 设置在记录数据中发生RuntimeError或OSError异常时是否抛出异常。默认值：False，表示打印错误日志，不抛出异常。</p></li>
<li><p><strong>export_options</strong> (Union[None, dict]) - 可以将保存在summary中的数据导出，并使用字典自定义所需的数据和文件格式。注：导出的文件大小不受 <cite>max_file_size</cite> 的限制。例如，您可以设置{‘tensor_format’:’npy’}将Tensor导出为 <cite>npy</cite> 文件。支持导出的数据类型如下所示。默认值：None，表示不导出数据。</p>
<ul>
<li><p><strong>tensor_format</strong> (Union[str, None]) - 自定义导出的Tensor的格式。支持[“npy”, None]。默认值：None，表示不导出Tensor。</p>
<ul>
<li><p><strong>npy</strong>：将Tensor导出为NPY文件。</p></li>
</ul>
</li>
</ul>
</li>
</ul>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>TypeError：</strong> <cite>max_file_size</cite> 不是整型，或 <cite>file_prefix</cite> 和 <cite>file_suffix</cite> 不是字符串。</p></li>
<li><p><strong>ValueError：</strong> 编译MindSpore时，设置 <cite>-s on</cite> 关闭了维测功能。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.summary</span> <span class="kn">import</span> <span class="n">SummaryRecord</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s1">&#39;__main__&#39;</span><span class="p">:</span>
<span class="gp">... </span>    <span class="c1"># use in with statement to auto close</span>
<span class="gp">... </span>    <span class="k">with</span> <span class="n">SummaryRecord</span><span class="p">(</span><span class="n">log_dir</span><span class="o">=</span><span class="s2">&quot;./summary_dir&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">summary_record</span><span class="p">:</span>
<span class="gp">... </span>        <span class="k">pass</span>
<span class="gp">...</span>
<span class="gp">... </span>    <span class="c1"># use in try .. finally .. to ensure closing</span>
<span class="gp">... </span>    <span class="k">try</span><span class="p">:</span>
<span class="gp">... </span>        <span class="n">summary_record</span> <span class="o">=</span> <span class="n">SummaryRecord</span><span class="p">(</span><span class="n">log_dir</span><span class="o">=</span><span class="s2">&quot;./summary_dir&quot;</span><span class="p">)</span>
<span class="gp">... </span>    <span class="k">finally</span><span class="p">:</span>
<span class="gp">... </span>        <span class="n">summary_record</span><span class="o">.</span><span class="n">close</span><span class="p">()</span>
</pre></div>
</div>
<dl class="method">
<dt id="mindspore.train.summary.SummaryRecord.add_value">
<code class="sig-name descname">add_value</code><span class="sig-paren">(</span><em class="sig-param">plugin</em>, <em class="sig-param">name</em>, <em class="sig-param">value</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.summary.SummaryRecord.add_value" title="Permalink to this definition">¶</a></dt>
<dd><p>添加需要记录的值。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>plugin</strong> (str) - 数据类型标签。</p>
<ul>
<li><p>graph：代表添加的数据为计算图。</p></li>
<li><p>scalar：代表添加的数据为标量。</p></li>
<li><p>image：代表添加的数据为图片。</p></li>
<li><p>tensor：代表添加的数据为张量。</p></li>
<li><p>histogram：代表添加的数据为直方图。</p></li>
<li><p>train_lineage：代表添加的数据为训练阶段的lineage数据。</p></li>
<li><p>eval_lineage：代表添加的数据为评估阶段的lineage数据。</p></li>
<li><p>dataset_graph：代表添加的数据为数据图。</p></li>
<li><p>custom_lineage_data：代表添加的数据为自定义lineage数据。</p></li>
<li><p>LANDSCAPE: 代表添加的数据为地形图。</p></li>
</ul>
</li>
<li><p><strong>name</strong> (str) - 数据名称。</p></li>
<li><p><strong>value</strong> (Union[Tensor, GraphProto, TrainLineage, EvaluationLineage, DatasetGraph, UserDefinedInfo，LossLandscape])：待存储的值。</p>
<ul>
<li><p>当plugin为”graph”时，参数值的数据类型应为”GraphProto”对象。具体详情，请参见 mindspore/ccsrc/anf_ir.proto。</p></li>
<li><p>当plugin为”scalar”、”image”、”tensor”或”histogram”时，参数值的数据类型应为”Tensor”对象。</p></li>
<li><p>当plugin为”train_lineage”时，参数值的数据类型应为”TrainLineage”对象。具体详情，请参见 mindspore/ccsrc/lineage.proto。</p></li>
<li><p>当plugin为”eval_lineage”时，参数值的数据类型应为”EvaluationLineage”对象。具体详情，请参见 mindspore/ccsrc/lineage.proto。</p></li>
<li><p>当plugin为”dataset_graph”时，参数值的数据类型应为”DatasetGraph”对象。具体详情，请参见 mindspore/ccsrc/lineage.proto。</p></li>
<li><p>当plugin为”custom_lineage_data”时，参数值的数据类型应为”UserDefinedInfo”对象。具体详情，请参见 mindspore/ccsrc/lineage.proto。</p></li>
<li><p>当plugin为”LANDSCAPE”时，参数值的数据类型应为”LossLandscape”对象。具体详情，请参见 mindspore/ccsrc/summary.proto。</p></li>
</ul>
</li>
</ul>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>ValueError：</strong> <cite>plugin</cite> 的值不在可选值内。</p></li>
<li><p><strong>TypeError：</strong> <cite>name</cite> 不是非空字符串，或当 <cite>plugin</cite> 为”scalar”、”image”、”tensor”或”histogram”时，<cite>value</cite> 的数据类型不是”Tensor”对象。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Tensor</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.summary</span> <span class="kn">import</span> <span class="n">SummaryRecord</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s1">&#39;__main__&#39;</span><span class="p">:</span>
<span class="gp">... </span>    <span class="k">with</span> <span class="n">SummaryRecord</span><span class="p">(</span><span class="n">log_dir</span><span class="o">=</span><span class="s2">&quot;./summary_dir&quot;</span><span class="p">,</span> <span class="n">file_prefix</span><span class="o">=</span><span class="s2">&quot;xx_&quot;</span><span class="p">,</span> <span class="n">file_suffix</span><span class="o">=</span><span class="s2">&quot;_yy&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">summary_record</span><span class="p">:</span>
<span class="gp">... </span>        <span class="n">summary_record</span><span class="o">.</span><span class="n">add_value</span><span class="p">(</span><span class="s1">&#39;scalar&#39;</span><span class="p">,</span> <span class="s1">&#39;loss&#39;</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">(</span><span class="mf">0.1</span><span class="p">))</span>
</pre></div>
</div>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.summary.SummaryRecord.close">
<code class="sig-name descname">close</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.summary.SummaryRecord.close" title="Permalink to this definition">¶</a></dt>
<dd><p>将缓冲区中的数据立刻写入文件并关闭SummaryRecord。请使用with语句或try…finally语句进行自动关闭。</p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.summary</span> <span class="kn">import</span> <span class="n">SummaryRecord</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s1">&#39;__main__&#39;</span><span class="p">:</span>
<span class="gp">... </span>    <span class="k">try</span><span class="p">:</span>
<span class="gp">... </span>        <span class="n">summary_record</span> <span class="o">=</span> <span class="n">SummaryRecord</span><span class="p">(</span><span class="n">log_dir</span><span class="o">=</span><span class="s2">&quot;./summary_dir&quot;</span><span class="p">)</span>
<span class="gp">... </span>    <span class="k">finally</span><span class="p">:</span>
<span class="gp">... </span>        <span class="n">summary_record</span><span class="o">.</span><span class="n">close</span><span class="p">()</span>
</pre></div>
</div>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.summary.SummaryRecord.flush">
<code class="sig-name descname">flush</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.summary.SummaryRecord.flush" title="Permalink to this definition">¶</a></dt>
<dd><p>刷新缓冲区，将缓冲区中的数据立刻写入文件。</p>
<p>调用该函数以确保所有挂起事件都已写入到磁盘。</p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.summary</span> <span class="kn">import</span> <span class="n">SummaryRecord</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s1">&#39;__main__&#39;</span><span class="p">:</span>
<span class="gp">... </span>    <span class="k">with</span> <span class="n">SummaryRecord</span><span class="p">(</span><span class="n">log_dir</span><span class="o">=</span><span class="s2">&quot;./summary_dir&quot;</span><span class="p">,</span> <span class="n">file_prefix</span><span class="o">=</span><span class="s2">&quot;xx_&quot;</span><span class="p">,</span> <span class="n">file_suffix</span><span class="o">=</span><span class="s2">&quot;_yy&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">summary_record</span><span class="p">:</span>
<span class="gp">... </span>        <span class="n">summary_record</span><span class="o">.</span><span class="n">flush</span><span class="p">()</span>
</pre></div>
</div>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.summary.SummaryRecord.log_dir">
<em class="property">property </em><code class="sig-name descname">log_dir</code><a class="headerlink" href="#mindspore.train.summary.SummaryRecord.log_dir" title="Permalink to this definition">¶</a></dt>
<dd><p>获取日志文件的完整路径。</p>
<p><strong>返回：</strong></p>
<p>str，日志文件的完整路径。</p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.summary</span> <span class="kn">import</span> <span class="n">SummaryRecord</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s1">&#39;__main__&#39;</span><span class="p">:</span>
<span class="gp">... </span>    <span class="k">with</span> <span class="n">SummaryRecord</span><span class="p">(</span><span class="n">log_dir</span><span class="o">=</span><span class="s2">&quot;./summary_dir&quot;</span><span class="p">,</span> <span class="n">file_prefix</span><span class="o">=</span><span class="s2">&quot;xx_&quot;</span><span class="p">,</span> <span class="n">file_suffix</span><span class="o">=</span><span class="s2">&quot;_yy&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">summary_record</span><span class="p">:</span>
<span class="gp">... </span>        <span class="n">log_dir</span> <span class="o">=</span> <span class="n">summary_record</span><span class="o">.</span><span class="n">log_dir</span>
</pre></div>
</div>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.summary.SummaryRecord.record">
<code class="sig-name descname">record</code><span class="sig-paren">(</span><em class="sig-param">step</em>, <em class="sig-param">train_network=None</em>, <em class="sig-param">plugin_filter=None</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.summary.SummaryRecord.record" title="Permalink to this definition">¶</a></dt>
<dd><p>记录summary。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>step</strong> (int) - 表示当前的step。</p></li>
<li><p><strong>train_network</strong> (Cell) - 表示用于保存计算图的训练网络。默认值：None，表示当原始网络的图为None时，不保存计算图。</p></li>
<li><p><strong>plugin_filter</strong> (Callable[[str], bool]) - 过滤器函数，用于过滤需要写入的标签项。默认值：None。</p></li>
</ul>
<p><strong>返回：</strong></p>
<p>bool，表示记录是否成功。</p>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>TypeError：</strong> <cite>step</cite> 不为整型，或 <cite>train_network</cite> 的类型不为 <a class="reference external" href="https://www.mindspore.cn/docs/api/zh-CN/master/api_python/nn/mindspore.nn.Cell.html?highlight=MindSpore.nn.cell#mindspore-nn-cell">mindspore.nn.Cell</a> 。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.summary</span> <span class="kn">import</span> <span class="n">SummaryRecord</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s1">&#39;__main__&#39;</span><span class="p">:</span>
<span class="gp">... </span>    <span class="k">with</span> <span class="n">SummaryRecord</span><span class="p">(</span><span class="n">log_dir</span><span class="o">=</span><span class="s2">&quot;./summary_dir&quot;</span><span class="p">,</span> <span class="n">file_prefix</span><span class="o">=</span><span class="s2">&quot;xx_&quot;</span><span class="p">,</span> <span class="n">file_suffix</span><span class="o">=</span><span class="s2">&quot;_yy&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">summary_record</span><span class="p">:</span>
<span class="gp">... </span>        <span class="n">summary_record</span><span class="o">.</span><span class="n">record</span><span class="p">(</span><span class="n">step</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span>
<span class="gp">...</span>
<span class="go">True</span>
</pre></div>
</div>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.summary.SummaryRecord.set_mode">
<code class="sig-name descname">set_mode</code><span class="sig-paren">(</span><em class="sig-param">mode</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.summary.SummaryRecord.set_mode" title="Permalink to this definition">¶</a></dt>
<dd><p>设置模型运行阶段。不同的阶段会影响记录数据的内容。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>mode</strong> (str) - 待设置的网络阶段，可选值为”train”或”eval”。</p>
<ul>
<li><p>train：代表训练阶段。</p></li>
<li><p>eval：代表评估阶段，此时 <cite>summary_record</cite> 不会记录summary算子的数据。</p></li>
</ul>
</li>
</ul>
<p><strong>异常：</strong></p>
<p><strong>ValueError：</strong> <cite>mode</cite> 的值不在可选值内。</p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.summary</span> <span class="kn">import</span> <span class="n">SummaryRecord</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s1">&#39;__main__&#39;</span><span class="p">:</span>
<span class="gp">... </span>    <span class="k">with</span> <span class="n">SummaryRecord</span><span class="p">(</span><span class="n">log_dir</span><span class="o">=</span><span class="s2">&quot;./summary_dir&quot;</span><span class="p">,</span> <span class="n">file_prefix</span><span class="o">=</span><span class="s2">&quot;xx_&quot;</span><span class="p">,</span> <span class="n">file_suffix</span><span class="o">=</span><span class="s2">&quot;_yy&quot;</span><span class="p">)</span> <span class="k">as</span> <span class="n">summary_record</span><span class="p">:</span>
<span class="gp">... </span>        <span class="n">summary_record</span><span class="o">.</span><span class="n">set_mode</span><span class="p">(</span><span class="s1">&#39;eval&#39;</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="mindspore-train-callback">
<h2>mindspore.train.callback<a class="headerlink" href="#mindspore-train-callback" title="Permalink to this headline">¶</a></h2>
<dl class="class">
<dt id="mindspore.train.callback.Callback">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.callback.</code><code class="sig-name descname">Callback</code><a class="headerlink" href="#mindspore.train.callback.Callback" title="Permalink to this definition">¶</a></dt>
<dd><p>用于构建Callback函数的基类。Callback函数是一个上下文管理器，在运行模型时被调用。
可以使用此机制进行一些自定义操作。</p>
<p>Callback函数可以在step或epoch开始前或结束后执行一些操作。
要创建自定义Callback，需要继承Callback基类并重载它相应的方法，有关自定义Callback的详细信息，请查看
<a class="reference external" href="https://www.mindspore.cn/docs/programming_guide/zh-CN/master/custom_debugging_info.html">Callback</a>。</p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span><span class="p">,</span> <span class="n">nn</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.callback</span> <span class="kn">import</span> <span class="n">Callback</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">class</span> <span class="nc">Print_info</span><span class="p">(</span><span class="n">Callback</span><span class="p">):</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="nf">step_end</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">run_context</span><span class="p">):</span>
<span class="gp">... </span>        <span class="n">cb_params</span> <span class="o">=</span> <span class="n">run_context</span><span class="o">.</span><span class="n">original_args</span><span class="p">()</span>
<span class="gp">... </span>        <span class="nb">print</span><span class="p">(</span><span class="s2">&quot;step_num: &quot;</span><span class="p">,</span> <span class="n">cb_params</span><span class="o">.</span><span class="n">cur_step_num</span><span class="p">)</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">print_cb</span> <span class="o">=</span> <span class="n">Print_info</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dataset</span> <span class="o">=</span> <span class="n">create_custom_dataset</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">(</span><span class="n">sparse</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;mean&#39;</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optim</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="mf">0.01</span><span class="p">,</span> <span class="mf">0.9</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="o">=</span><span class="n">loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="o">=</span><span class="n">optim</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">dataset</span><span class="p">,</span> <span class="n">callbacks</span><span class="o">=</span><span class="n">print_cb</span><span class="p">)</span>
<span class="go">step_num: 1</span>
</pre></div>
</div>
<dl class="method">
<dt id="mindspore.train.callback.Callback.begin">
<code class="sig-name descname">begin</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.Callback.begin" title="Permalink to this definition">¶</a></dt>
<dd><p>在网络执行之前被调用一次。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的一些基本信息。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.Callback.end">
<code class="sig-name descname">end</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.Callback.end" title="Permalink to this definition">¶</a></dt>
<dd><p>网络执行后被调用一次。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的一些基本信息。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.Callback.epoch_begin">
<code class="sig-name descname">epoch_begin</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.Callback.epoch_begin" title="Permalink to this definition">¶</a></dt>
<dd><p>在每个epoch开始之前被调用。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的一些基本信息。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.Callback.epoch_end">
<code class="sig-name descname">epoch_end</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.Callback.epoch_end" title="Permalink to this definition">¶</a></dt>
<dd><p>在每个epoch结束后被调用。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的一些基本信息。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.Callback.step_begin">
<code class="sig-name descname">step_begin</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.Callback.step_begin" title="Permalink to this definition">¶</a></dt>
<dd><p>在每个step开始之前被调用。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的一些基本信息。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.Callback.step_end">
<code class="sig-name descname">step_end</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.Callback.step_end" title="Permalink to this definition">¶</a></dt>
<dd><p>在每个step完成后被调用。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的一些基本信息。</p></li>
</ul>
</dd></dl>

</dd></dl>

<dl class="class">
<dt id="mindspore.train.callback.LossMonitor">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.callback.</code><code class="sig-name descname">LossMonitor</code><span class="sig-paren">(</span><em class="sig-param">per_print_times=1</em>, <em class="sig-param">has_trained_epoch=0</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.LossMonitor" title="Permalink to this definition">¶</a></dt>
<dd><p>监控训练的loss。</p>
<p>如果loss是NAN或INF，则终止训练。</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>如果 <cite>per_print_times</cite> 为0，则不打印loss。</p>
</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>per_print_times</strong> (int) - 表示每隔多少个step打印一次loss。默认值：1。</p></li>
<li><p><strong>has_trained_epoch</strong> (int) - 表示已经训练了多少个epoch，如何设置了该参数，LossMonitor将监控该数值之后epoch的loss值。默认值：0。</p></li>
</ul>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>ValueError</strong> - 当 <cite>per_print_times</cite> 不是整数或小于零。</p></li>
<li><p><strong>ValueError</strong> - 当 <cite>has_trained_epoch</cite> 不是整数或小于零。</p></li>
</ul>
<dl class="method">
<dt id="mindspore.train.callback.LossMonitor.step_end">
<code class="sig-name descname">step_end</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.LossMonitor.step_end" title="Permalink to this definition">¶</a></dt>
<dd><p>step结束时打印训练loss。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的相关信息。</p></li>
</ul>
</dd></dl>

</dd></dl>

<dl class="class">
<dt id="mindspore.train.callback.TimeMonitor">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.callback.</code><code class="sig-name descname">TimeMonitor</code><span class="sig-paren">(</span><em class="sig-param">data_size=None</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.TimeMonitor" title="Permalink to this definition">¶</a></dt>
<dd><p>监控训练时间。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>data_size</strong> (int) - 表示每隔多少个step打印一次信息。如果程序在训练期间获取到Model的 <cite>batch_num</cite> ，则将把 <cite>data_size</cite> 设为 <cite>batch_num</cite> ，否则将使用 <cite>data_size</cite> 。默认值：None。</p></li>
</ul>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>ValueError</strong> - <cite>data_size</cite> 不是正整数。</p></li>
</ul>
<dl class="method">
<dt id="mindspore.train.callback.TimeMonitor.epoch_begin">
<code class="sig-name descname">epoch_begin</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.TimeMonitor.epoch_begin" title="Permalink to this definition">¶</a></dt>
<dd><p>在epoch开始时记录时间。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的相关信息。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.TimeMonitor.epoch_end">
<code class="sig-name descname">epoch_end</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.TimeMonitor.epoch_end" title="Permalink to this definition">¶</a></dt>
<dd><p>在epoch结束时打印epoch的耗时。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的相关信息。</p></li>
</ul>
</dd></dl>

</dd></dl>

<dl class="class">
<dt id="mindspore.train.callback.ModelCheckpoint">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.callback.</code><code class="sig-name descname">ModelCheckpoint</code><span class="sig-paren">(</span><em class="sig-param">prefix='CKP'</em>, <em class="sig-param">directory=None</em>, <em class="sig-param">config=None</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.ModelCheckpoint" title="Permalink to this definition">¶</a></dt>
<dd><p>checkpoint的回调函数。</p>
<p>在训练过程中调用该方法可以保存网络参数。</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>在分布式训练场景下，请为每个训练进程指定不同的目录来保存checkpoint文件。否则，可能会训练失败。</p>
</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>prefix</strong> (str) - checkpoint文件的前缀名称。默认值：CKP。</p></li>
<li><p><strong>directory</strong> (str) - 保存checkpoint文件的文件夹路径。默认情况下，文件保存在当前目录下。默认值：None。</p></li>
<li><p><strong>config</strong> (CheckpointConfig) - checkpoint策略配置。默认值：None。</p></li>
</ul>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>ValueError</strong> - 如果prefix参数不是str类型或包含’/’字符。</p></li>
<li><p><strong>ValueError</strong> - 如果directory参数不是str类型。</p></li>
<li><p><strong>TypeError</strong> - config不是CheckpointConfig类型。</p></li>
</ul>
<dl class="method">
<dt id="mindspore.train.callback.ModelCheckpoint.end">
<code class="sig-name descname">end</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.ModelCheckpoint.end" title="Permalink to this definition">¶</a></dt>
<dd><p>在训练结束后，会保存最后一个step的checkpoint。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的一些基本信息。</p></li>
</ul>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.ModelCheckpoint.latest_ckpt_file_name">
<em class="property">property </em><code class="sig-name descname">latest_ckpt_file_name</code><a class="headerlink" href="#mindspore.train.callback.ModelCheckpoint.latest_ckpt_file_name" title="Permalink to this definition">¶</a></dt>
<dd><p>返回最新的checkpoint路径和文件名。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.ModelCheckpoint.step_end">
<code class="sig-name descname">step_end</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.ModelCheckpoint.step_end" title="Permalink to this definition">¶</a></dt>
<dd><p>在step结束时保存checkpoint。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的一些基本信息。</p></li>
</ul>
</dd></dl>

</dd></dl>

<dl class="class">
<dt id="mindspore.train.callback.SummaryCollector">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.callback.</code><code class="sig-name descname">SummaryCollector</code><span class="sig-paren">(</span><em class="sig-param">summary_dir</em>, <em class="sig-param">collect_freq=10</em>, <em class="sig-param">collect_specified_data=None</em>, <em class="sig-param">keep_default_action=True</em>, <em class="sig-param">custom_lineage_data=None</em>, <em class="sig-param">collect_tensor_freq=None</em>, <em class="sig-param">max_file_size=None</em>, <em class="sig-param">export_options=None</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.SummaryCollector" title="Permalink to this definition">¶</a></dt>
<dd><p>SummaryCollector可以收集一些常用信息。</p>
<p>它可以帮助收集loss、学习率、计算图等。
SummaryCollector还可以允许通过 <a class="reference external" href="https://www.mindspore.cn/mindinsight/docs/zh-CN/master/summary_record.html#summarysummarycollector">summary算子</a> 将数据收集到summary文件中。</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<ul class="simple">
<li><p>不允许在回调列表中存在多个SummaryCollector实例。</p></li>
<li><p>并非所有信息都可以在训练阶段或评估阶段收集。</p></li>
<li><p>SummaryCollector始终记录summary算子收集的数据。</p></li>
<li><p>SummaryCollector仅支持Linux系统。</p></li>
<li><p>编译MindSpore时，设置 <cite>-s on</cite> 关闭维测功能后，SummaryCollector不可用。</p></li>
</ul>
</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>summary_dir</strong> (str) - 收集的数据将存储到此目录。如果目录不存在，将自动创建。</p></li>
<li><p><strong>collect_freq</strong> (int) - 设置数据收集的频率，频率应大于零，单位为 <cite>step</cite> 。如果设置了频率，将在(current steps % freq)=0时收集数据，并且将总是收集第一个step。需要注意的是，如果使用数据下沉模式，单位将变成 <cite>epoch</cite> 。不建议过于频繁地收集数据，因为这可能会影响性能。默认值：10。</p></li>
<li><p><strong>collect_specified_data</strong> (Union[None, dict]) - 对收集的数据进行自定义操作。您可以使用字典自定义需要收集的数据类型。例如，您可以设置{‘collect_metric’:False}不去收集metrics。支持控制的数据如下。默认值：None，收集所有数据。</p>
<ul>
<li><p><strong>collect_metric</strong> (bool) - 表示是否收集训练metrics，目前只收集loss。把第一个输出视为loss，并且算出其平均数。默认值：True。</p></li>
<li><p><strong>collect_graph</strong> (bool) - 表示是否收集计算图。目前只收集训练计算图。默认值：True。</p></li>
<li><p><strong>collect_train_lineage</strong> (bool) - 表示是否收集训练阶段的lineage数据，该字段将显示在MindInsight的 <a class="reference external" href="https://www.mindspore.cn/mindinsight/docs/zh-CN/master/lineage_and_scalars_comparison.html">lineage页面</a> 上。默认值：True。</p></li>
<li><p><strong>collect_eval_lineage</strong> (bool) - 表示是否收集评估阶段的lineage数据，该字段将显示在MindInsight的lineage页面上。默认值：True。</p></li>
<li><p><strong>collect_input_data</strong> (bool) - 表示是否为每次训练收集数据集。目前仅支持图像数据。如果数据集中有多列数据，则第一列应为图像数据。默认值：True。</p></li>
<li><p><strong>collect_dataset_graph</strong> (bool) - 表示是否收集训练阶段的数据集图。默认值：True。</p></li>
<li><p><strong>histogram_regular</strong> (Union[str, None]) - 收集参数分布页面的权重和偏置，并在MindInsight中展示。此字段允许正则表达式控制要收集的参数。不建议一次收集太多参数，因为这会影响性能。注：如果收集的参数太多并且内存不足，训练将会失败。默认值：None，表示只收集网络的前五个超参。</p></li>
<li><p><strong>collect_landscape</strong> (Union[dict, None]) - 收集创建loss地形图所需要的参数。</p>
<ul>
<li><p><strong>landscape_size</strong> (int) - 指定生成loss地形图的图像分辨率。例如：如果设置为128，则loss地形图的分辨率是128*128。注意：计算loss地形图的时间随着分辨率的增大而增加。默认值：40。可选值：3-256。</p></li>
<li><p><strong>unit</strong> (str) - 指定训练过程中保存checkpoint时，下方参数 <cite>intervals</cite> 以何种形式收集模型权重。例如：将 <cite>intervals</cite> 设置为[[1, 2, 3, 4]]，如果 <cite>unit</cite> 设置为step，则收集模型权重的频率单位为step，将保存1-4个step的模型权重，而 <cite>unit</cite> 设置为epoch，则将保存1-4个epoch的模型权重。默认值：step。可选值：epoch/step。</p></li>
<li><p><strong>create_landscape</strong> (dict) - 选择创建哪种类型的loss地形图，分为训练过程loss地形图（train）和训练结果loss地形图（result）。默认值：{“train”: True, “result”: True}。可选值：True/False。</p></li>
<li><p><strong>num_samples</strong> (int) - 创建loss地形图所使用的数据集的大小。例如：在图像数据集中，您可以设置 <cite>num_samples</cite> 是128，这意味着将有128张图片被用来创建loss地形图。注意：<cite>num_samples</cite> 越大，计算loss地形图时间越长。默认值：128。</p></li>
<li><p><strong>intervals</strong> (List[List[int]]) - 指定loss地形图的区间。例如：如果用户想要创建两张训练过程的loss地形图，分别为1-5epoch和6-10epoch，则用户可以设置[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]。注意：每个区间至少包含3个epoch。</p></li>
</ul>
</li>
</ul>
</li>
<li><p><strong>keep_default_action</strong> (bool) - 此字段影响 <cite>collect_specified_data</cite> 字段的收集行为。True：表示设置指定数据后，其他数据按默认设置收集。False：表示设置指定数据后，只收集指定数据，不收集其他数据。默认值：True。</p></li>
<li><p><strong>custom_lineage_data</strong> (Union[dict, None]) - 允许您自定义数据并将数据显示在MindInsight的lineage页面上。在自定义数据中，key支持str类型，value支持str、int和float类型。默认值：None，表示不存在自定义数据。</p></li>
<li><p><strong>collect_tensor_freq</strong> (Optional[int]) - 语义与 <cite>collect_freq</cite> 的相同，但仅控制TensorSummary。由于TensorSummary数据太大，无法与其他summary数据进行比较，因此此参数用于降低收集量。默认情况下，收集TensorSummary数据的最大step数量为20，但不会超过收集其他summary数据的step数量。例如，给定 <cite>collect_freq=10</cite> ，当总step数量为600时，TensorSummary将收集20个step，而收集其他summary数据时会收集61个step。但当总step数量为20时，TensorSummary和其他summary将收集3个step。另外请注意，在并行模式下，会平均分配总的step数量，这会影响TensorSummary收集的step的数量。默认值：None，表示要遵循上述规则。</p></li>
<li><p><strong>max_file_size</strong> (Optional[int]) - 可写入磁盘的每个文件的最大大小（以字节为单位）。例如，如果不大于4GB，则设置 <cite>max_file_size=4*1024**3</cite> 。默认值：None，表示无限制。</p></li>
<li><p><strong>export_options</strong> (Union[None, dict]) - 表示对导出的数据执行自定义操作。注：导出的文件的大小不受 <cite>max_file_size</cite> 的限制。您可以使用字典自定义导出的数据。例如，您可以设置{‘tensor_format’:’npy’}将tensor导出为 <cite>npy</cite> 文件。支持控制的数据如下所示。默认值：None，表示不导出数据。</p>
<ul>
<li><p><strong>tensor_format</strong> (Union[str, None]) - 自定义导出的tensor的格式。支持[“npy”, None]。默认值：None，表示不导出tensor。</p>
<ul>
<li><p><strong>npy</strong> - 将tensor导出为NPY文件。</p></li>
</ul>
</li>
</ul>
</li>
</ul>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>ValueError：</strong> 编译MindSpore时，设置 <cite>-s on</cite> 关闭了维测功能。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">mindspore.nn</span> <span class="k">as</span> <span class="nn">nn</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">context</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.callback</span> <span class="kn">import</span> <span class="n">SummaryCollector</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.nn</span> <span class="kn">import</span> <span class="n">Accuracy</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s1">&#39;__main__&#39;</span><span class="p">:</span>
<span class="gp">... </span>    <span class="c1"># If the device_target is GPU, set the device_target to &quot;GPU&quot;</span>
<span class="gp">... </span>    <span class="n">context</span><span class="o">.</span><span class="n">set_context</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="n">context</span><span class="o">.</span><span class="n">GRAPH_MODE</span><span class="p">,</span> <span class="n">device_target</span><span class="o">=</span><span class="s2">&quot;Ascend&quot;</span><span class="p">)</span>
<span class="gp">... </span>    <span class="n">mnist_dataset_dir</span> <span class="o">=</span> <span class="s1">&#39;/path/to/mnist_dataset_directory&#39;</span>
<span class="gp">... </span>    <span class="c1"># The detail of create_dataset method shown in model_zoo.official.cv.lenet.src.dataset.py</span>
<span class="gp">... </span>    <span class="n">ds_train</span> <span class="o">=</span> <span class="n">create_dataset</span><span class="p">(</span><span class="n">mnist_dataset_dir</span><span class="p">,</span> <span class="mi">32</span><span class="p">)</span>
<span class="gp">... </span>    <span class="c1"># The detail of LeNet5 shown in model_zoo.official.cv.lenet.src.lenet.py</span>
<span class="gp">... </span>    <span class="n">network</span> <span class="o">=</span> <span class="n">LeNet5</span><span class="p">(</span><span class="mi">10</span><span class="p">)</span>
<span class="gp">... </span>    <span class="n">net_loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">(</span><span class="n">sparse</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s2">&quot;mean&quot;</span><span class="p">)</span>
<span class="gp">... </span>    <span class="n">net_opt</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">network</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="mf">0.01</span><span class="p">,</span> <span class="mf">0.9</span><span class="p">)</span>
<span class="gp">... </span>    <span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">network</span><span class="p">,</span> <span class="n">net_loss</span><span class="p">,</span> <span class="n">net_opt</span><span class="p">,</span> <span class="n">metrics</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;Accuracy&quot;</span><span class="p">:</span> <span class="n">Accuracy</span><span class="p">()},</span> <span class="n">amp_level</span><span class="o">=</span><span class="s2">&quot;O2&quot;</span><span class="p">)</span>
<span class="gp">...</span>
<span class="gp">... </span>    <span class="c1"># Simple usage:</span>
<span class="gp">... </span>    <span class="n">summary_collector</span> <span class="o">=</span> <span class="n">SummaryCollector</span><span class="p">(</span><span class="n">summary_dir</span><span class="o">=</span><span class="s1">&#39;./summary_dir&#39;</span><span class="p">)</span>
<span class="gp">... </span>    <span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">ds_train</span><span class="p">,</span> <span class="n">callbacks</span><span class="o">=</span><span class="p">[</span><span class="n">summary_collector</span><span class="p">],</span> <span class="n">dataset_sink_mode</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="gp">...</span>
<span class="gp">... </span>    <span class="c1"># Do not collect metric and collect the first layer parameter, others are collected by default</span>
<span class="gp">... </span>    <span class="n">specified</span><span class="o">=</span><span class="p">{</span><span class="s1">&#39;collect_metric&#39;</span><span class="p">:</span> <span class="kc">False</span><span class="p">,</span> <span class="s1">&#39;histogram_regular&#39;</span><span class="p">:</span> <span class="s1">&#39;^conv1.*&#39;</span><span class="p">}</span>
<span class="gp">... </span>    <span class="n">summary_collector</span> <span class="o">=</span> <span class="n">SummaryCollector</span><span class="p">(</span><span class="n">summary_dir</span><span class="o">=</span><span class="s1">&#39;./summary_dir&#39;</span><span class="p">,</span> <span class="n">collect_specified_data</span><span class="o">=</span><span class="n">specified</span><span class="p">)</span>
<span class="gp">... </span>    <span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">ds_train</span><span class="p">,</span> <span class="n">callbacks</span><span class="o">=</span><span class="p">[</span><span class="n">summary_collector</span><span class="p">],</span> <span class="n">dataset_sink_mode</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="class">
<dt id="mindspore.train.callback.CheckpointConfig">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.callback.</code><code class="sig-name descname">CheckpointConfig</code><span class="sig-paren">(</span><em class="sig-param">save_checkpoint_steps=1</em>, <em class="sig-param">save_checkpoint_seconds=0</em>, <em class="sig-param">keep_checkpoint_max=5</em>, <em class="sig-param">keep_checkpoint_per_n_minutes=0</em>, <em class="sig-param">integrated_save=True</em>, <em class="sig-param">async_save=False</em>, <em class="sig-param">saved_network=None</em>, <em class="sig-param">append_info=None</em>, <em class="sig-param">enc_key=None</em>, <em class="sig-param">enc_mode='AES-GCM'</em>, <em class="sig-param">exception_save=False</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig" title="Permalink to this definition">¶</a></dt>
<dd><p>保存checkpoint时的配置策略。</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>在训练过程中，如果数据集是通过数据通道传输的，建议将 <cite>save_checkpoint_steps</cite> 设为循环下沉step数量的整数倍数，否则，保存checkpoint的时机可能会有偏差。建议同时只设置一种触发保存checkpoint策略和一种保留checkpoint文件总数策略。如果同时设置了 <cite>save_checkpoint_steps</cite> 和 <cite>save_checkpoint_seconds</cite> ，则 <cite>save_checkpoint_seconds</cite> 无效。如果同时设置了 <cite>keep_checkpoint_max</cite> 和 <cite>keep_checkpoint_per_n_minutes</cite> ，则 <cite>keep_checkpoint_per_n_minutes</cite> 无效。</p>
</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>save_checkpoint_steps</strong> (int) - 每隔多少个step保存一次checkpoint。默认值：1。</p></li>
<li><p><strong>save_checkpoint_seconds</strong> (int) - 每隔多少秒保存一次checkpoint。不能同时与 <cite>save_checkpoint_steps</cite> 一起使用。默认值：0。</p></li>
<li><p><strong>keep_checkpoint_max</strong> (int) - 最多保存多少个checkpoint文件。默认值：5。</p></li>
<li><p><strong>keep_checkpoint_per_n_minutes</strong> (int) - 每隔多少分钟保存一个checkpoint文件。不能同时与 <cite>keep_checkpoint_max</cite> 一起使用。默认值：0。</p></li>
<li><p><strong>integrated_save</strong> (bool) - 在自动并行场景下，是否合并保存拆分后的Tensor。合并保存功能仅支持在自动并行场景中使用，在手动并行场景中不支持。默认值：True。</p></li>
<li><p><strong>async_save</strong> (bool) - 是否异步执行保存checkpoint文件。默认值：False。</p></li>
<li><p><strong>saved_network</strong> (Cell) - 保存在checkpoint文件中的网络。如果 <cite>saved_network</cite> 没有被训练，则保存 <cite>saved_network</cite> 的初始值。默认值：None。</p></li>
<li><p><strong>append_info</strong> (list) - 保存在checkpoint文件中的信息。支持”epoch_num”、”step_num”和dict类型。dict的key必须是str，dict的value必须是int、float或bool中的一个。默认值：None。</p></li>
<li><p><strong>enc_key</strong> (Union[None, bytes]) - 用于加密的字节类型key。如果值为None，则不需要加密。默认值：None。</p></li>
<li><p><strong>enc_mode</strong> (str) - 仅当 <cite>enc_key</cite> 不设为None时，该参数有效。指定了加密模式，目前支持AES-GCM和AES-CBC。默认值：AES-GCM。</p></li>
<li><p><strong>exception_save</strong> (bool) - 当有异常发生时，是否保存当前checkpoint文件。默认值：False。</p></li>
</ul>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>ValueError</strong> - 输入参数的类型不正确。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span><span class="p">,</span> <span class="n">nn</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.callback</span> <span class="kn">import</span> <span class="n">ModelCheckpoint</span><span class="p">,</span> <span class="n">CheckpointConfig</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.common.initializer</span> <span class="kn">import</span> <span class="n">Normal</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">class</span> <span class="nc">LeNet5</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Cell</span><span class="p">):</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">num_class</span><span class="o">=</span><span class="mi">10</span><span class="p">,</span> <span class="n">num_channel</span><span class="o">=</span><span class="mi">1</span><span class="p">):</span>
<span class="gp">... </span>        <span class="nb">super</span><span class="p">(</span><span class="n">LeNet5</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">conv1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="n">num_channel</span><span class="p">,</span> <span class="mi">6</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="n">pad_mode</span><span class="o">=</span><span class="s1">&#39;valid&#39;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">conv2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">6</span><span class="p">,</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="n">pad_mode</span><span class="o">=</span><span class="s1">&#39;valid&#39;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Dense</span><span class="p">(</span><span class="mi">16</span> <span class="o">*</span> <span class="mi">5</span> <span class="o">*</span> <span class="mi">5</span><span class="p">,</span> <span class="mi">120</span><span class="p">,</span> <span class="n">weight_init</span><span class="o">=</span><span class="n">Normal</span><span class="p">(</span><span class="mf">0.02</span><span class="p">))</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Dense</span><span class="p">(</span><span class="mi">120</span><span class="p">,</span> <span class="mi">84</span><span class="p">,</span> <span class="n">weight_init</span><span class="o">=</span><span class="n">Normal</span><span class="p">(</span><span class="mf">0.02</span><span class="p">))</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">fc3</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Dense</span><span class="p">(</span><span class="mi">84</span><span class="p">,</span> <span class="n">num_class</span><span class="p">,</span> <span class="n">weight_init</span><span class="o">=</span><span class="n">Normal</span><span class="p">(</span><span class="mf">0.02</span><span class="p">))</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">relu</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">ReLU</span><span class="p">()</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">max_pool2d</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">MaxPool2d</span><span class="p">(</span><span class="n">kernel_size</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">stride</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">flatten</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Flatten</span><span class="p">()</span>
<span class="gp">...</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_pool2d</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv1</span><span class="p">(</span><span class="n">x</span><span class="p">)))</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_pool2d</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv2</span><span class="p">(</span><span class="n">x</span><span class="p">)))</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">flatten</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">fc1</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">fc2</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fc3</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
<span class="gp">... </span>        <span class="k">return</span> <span class="n">x</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">LeNet5</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">(</span><span class="n">sparse</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;mean&#39;</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optim</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="mf">0.01</span><span class="p">,</span> <span class="mf">0.9</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="o">=</span><span class="n">loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="o">=</span><span class="n">optim</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">data_path</span> <span class="o">=</span> <span class="s1">&#39;./MNIST_Data&#39;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dataset</span> <span class="o">=</span> <span class="n">create_dataset</span><span class="p">(</span><span class="n">data_path</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">config</span> <span class="o">=</span> <span class="n">CheckpointConfig</span><span class="p">(</span><span class="n">saved_network</span><span class="o">=</span><span class="n">net</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">ckpoint_cb</span> <span class="o">=</span> <span class="n">ModelCheckpoint</span><span class="p">(</span><span class="n">prefix</span><span class="o">=</span><span class="s1">&#39;LeNet5&#39;</span><span class="p">,</span> <span class="n">directory</span><span class="o">=</span><span class="s1">&#39;./checkpoint&#39;</span><span class="p">,</span> <span class="n">config</span><span class="o">=</span><span class="n">config</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="n">dataset</span><span class="p">,</span> <span class="n">callbacks</span><span class="o">=</span><span class="n">ckpoint_cb</span><span class="p">)</span>
</pre></div>
</div>
<dl class="method">
<dt id="mindspore.train.callback.CheckpointConfig.append_dict">
<em class="property">property </em><code class="sig-name descname">append_dict</code><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig.append_dict" title="Permalink to this definition">¶</a></dt>
<dd><p>获取需要额外保存到checkpoint中的字典的值。</p>
<p><strong>返回：</strong></p>
<p>Dict: 字典中的值。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.CheckpointConfig.async_save">
<em class="property">property </em><code class="sig-name descname">async_save</code><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig.async_save" title="Permalink to this definition">¶</a></dt>
<dd><p>获取是否异步保存checkpoint。</p>
<p><strong>返回：</strong></p>
<p>Bool: 是否异步保存checkpoint。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.CheckpointConfig.enc_key">
<em class="property">property </em><code class="sig-name descname">enc_key</code><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig.enc_key" title="Permalink to this definition">¶</a></dt>
<dd><p>获取加密的key值。</p>
<p><strong>返回：</strong></p>
<p>(None, bytes): 加密的key值。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.CheckpointConfig.enc_mode">
<em class="property">property </em><code class="sig-name descname">enc_mode</code><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig.enc_mode" title="Permalink to this definition">¶</a></dt>
<dd><p>获取加密模式。</p>
<p><strong>返回：</strong></p>
<p>str: 加密模式。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.CheckpointConfig.get_checkpoint_policy">
<code class="sig-name descname">get_checkpoint_policy</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig.get_checkpoint_policy" title="Permalink to this definition">¶</a></dt>
<dd><p>获取checkpoint的保存策略。</p>
<p><strong>返回：</strong></p>
<p>Dict: checkpoint的保存策略。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.CheckpointConfig.integrated_save">
<em class="property">property </em><code class="sig-name descname">integrated_save</code><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig.integrated_save" title="Permalink to this definition">¶</a></dt>
<dd><p>获取是否合并保存拆分后的Tensor。</p>
<p><strong>返回：</strong></p>
<p>Bool: 获取是否合并保存拆分后的Tensor。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.CheckpointConfig.keep_checkpoint_max">
<em class="property">property </em><code class="sig-name descname">keep_checkpoint_max</code><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig.keep_checkpoint_max" title="Permalink to this definition">¶</a></dt>
<dd><p>获取最多保存checkpoint文件的数量。</p>
<p><strong>返回：</strong></p>
<p>Int: 最多保存checkpoint文件的数量。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.CheckpointConfig.keep_checkpoint_per_n_minutes">
<em class="property">property </em><code class="sig-name descname">keep_checkpoint_per_n_minutes</code><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig.keep_checkpoint_per_n_minutes" title="Permalink to this definition">¶</a></dt>
<dd><p>获取每隔多少分钟保存一个checkpoint文件。</p>
<p><strong>返回：</strong></p>
<p>Int: 每隔多少分钟保存一个checkpoint文件。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.CheckpointConfig.saved_network">
<em class="property">property </em><code class="sig-name descname">saved_network</code><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig.saved_network" title="Permalink to this definition">¶</a></dt>
<dd><p>获取需要保存的网络。</p>
<p><strong>返回：</strong></p>
<p>Cell: 需要保存的网络。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.CheckpointConfig.save_checkpoint_seconds">
<em class="property">property </em><code class="sig-name descname">save_checkpoint_seconds</code><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig.save_checkpoint_seconds" title="Permalink to this definition">¶</a></dt>
<dd><p>获取每隔多少秒保存一次checkpoint文件。</p>
<p><strong>返回：</strong></p>
<p>Int: 每隔多少秒保存一次checkpoint文件。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.CheckpointConfig.save_checkpoint_steps">
<em class="property">property </em><code class="sig-name descname">save_checkpoint_steps</code><a class="headerlink" href="#mindspore.train.callback.CheckpointConfig.save_checkpoint_steps" title="Permalink to this definition">¶</a></dt>
<dd><p>获取每隔多少个step保存一次checkpoint文件。</p>
<p><strong>返回：</strong></p>
<p>Int: 每隔多少个step保存一次checkpoint文件。</p>
</dd></dl>

</dd></dl>

<dl class="class">
<dt id="mindspore.train.callback.RunContext">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.callback.</code><code class="sig-name descname">RunContext</code><span class="sig-paren">(</span><em class="sig-param">original_args</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.RunContext" title="Permalink to this definition">¶</a></dt>
<dd><p>提供模型的相关信息。</p>
<p>在Model方法里提供模型的相关信息。
回调函数可以调用 <cite>request_stop()</cite> 方法来停止迭代。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>original_args</strong> (dict) - 模型的相关信息。</p></li>
</ul>
<dl class="method">
<dt id="mindspore.train.callback.RunContext.get_stop_requested">
<code class="sig-name descname">get_stop_requested</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.RunContext.get_stop_requested" title="Permalink to this definition">¶</a></dt>
<dd><p>获取是否停止训练的标志。</p>
<p><strong>返回：</strong></p>
<p>bool，如果为True，则 <cite>Model.train()</cite> 停止迭代。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.RunContext.original_args">
<code class="sig-name descname">original_args</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.RunContext.original_args" title="Permalink to this definition">¶</a></dt>
<dd><p>获取模型相关信息的对象。</p>
<p><strong>返回：</strong></p>
<p>dict，含有模型的相关信息的对象。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.RunContext.request_stop">
<code class="sig-name descname">request_stop</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.RunContext.request_stop" title="Permalink to this definition">¶</a></dt>
<dd><p>在训练期间设置停止请求。</p>
<p>可以使用此函数请求停止训练。 <cite>Model.train()</cite> 会检查是否调用此函数。</p>
</dd></dl>

</dd></dl>

<dl class="class">
<dt id="mindspore.train.callback.LearningRateScheduler">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.callback.</code><code class="sig-name descname">LearningRateScheduler</code><span class="sig-paren">(</span><em class="sig-param">learning_rate_function</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.LearningRateScheduler" title="Permalink to this definition">¶</a></dt>
<dd><p>用于在训练期间更改学习率。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>learning_rate_function</strong> (Function) - 在训练期间更改学习率的函数。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.callback</span> <span class="kn">import</span> <span class="n">LearningRateScheduler</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">mindspore.nn</span> <span class="k">as</span> <span class="nn">nn</span>
<span class="gp">...</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">def</span> <span class="nf">learning_rate_function</span><span class="p">(</span><span class="n">lr</span><span class="p">,</span> <span class="n">cur_step_num</span><span class="p">):</span>
<span class="gp">... </span>    <span class="k">if</span> <span class="n">cur_step_num</span><span class="o">%</span><span class="mi">1000</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="gp">... </span>        <span class="n">lr</span> <span class="o">=</span> <span class="n">lr</span><span class="o">*</span><span class="mf">0.1</span>
<span class="gp">... </span>    <span class="k">return</span> <span class="n">lr</span>
<span class="gp">...</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">lr</span> <span class="o">=</span> <span class="mf">0.1</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">momentum</span> <span class="o">=</span> <span class="mf">0.9</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optim</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="n">learning_rate</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="n">momentum</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="o">=</span><span class="n">loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="o">=</span><span class="n">optim</span><span class="p">)</span>
<span class="gp">...</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dataset</span> <span class="o">=</span> <span class="n">create_custom_dataset</span><span class="p">(</span><span class="s2">&quot;custom_dataset_path&quot;</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">dataset</span><span class="p">,</span> <span class="n">callbacks</span><span class="o">=</span><span class="p">[</span><span class="n">LearningRateScheduler</span><span class="p">(</span><span class="n">learning_rate_function</span><span class="p">)],</span>
<span class="gp">... </span>            <span class="n">dataset_sink_mode</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
</pre></div>
</div>
<dl class="method">
<dt id="mindspore.train.callback.LearningRateScheduler.step_end">
<code class="sig-name descname">step_end</code><span class="sig-paren">(</span><em class="sig-param">run_context</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.LearningRateScheduler.step_end" title="Permalink to this definition">¶</a></dt>
<dd><p>在step结束时更改学习率。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>run_context</strong> (RunContext) - 包含模型的一些基本信息。</p></li>
</ul>
</dd></dl>

</dd></dl>

<dl class="class">
<dt id="mindspore.train.callback.SummaryLandscape">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.callback.</code><code class="sig-name descname">SummaryLandscape</code><span class="sig-paren">(</span><em class="sig-param">summary_dir</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.SummaryLandscape" title="Permalink to this definition">¶</a></dt>
<dd><p>SummaryLandscape可以帮助您收集loss地形图的信息。通过计算loss，可以在PCA（Principal Component Analysis）方向或者随机方向创建地形图。</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<ul class="simple">
<li><p>SummaryLandscape仅支持Linux系统。</p></li>
</ul>
</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>summary_dir</strong> (str) - 该路径将被用来保存创建地形图所使用的数据。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">mindspore.nn</span> <span class="k">as</span> <span class="nn">nn</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">context</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.callback</span> <span class="kn">import</span> <span class="n">SummaryCollector</span><span class="p">,</span> <span class="n">SummaryLandscape</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.nn</span> <span class="kn">import</span> <span class="n">Loss</span><span class="p">,</span> <span class="n">Accuracy</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s1">&#39;__main__&#39;</span><span class="p">:</span>
<span class="gp">... </span>    <span class="c1"># If the device_target is Ascend, set the device_target to &quot;Ascend&quot;</span>
<span class="gp">... </span>    <span class="n">context</span><span class="o">.</span><span class="n">set_context</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="n">context</span><span class="o">.</span><span class="n">GRAPH_MODE</span><span class="p">,</span> <span class="n">device_target</span><span class="o">=</span><span class="s2">&quot;GPU&quot;</span><span class="p">)</span>
<span class="gp">... </span>    <span class="n">mnist_dataset_dir</span> <span class="o">=</span> <span class="s1">&#39;/path/to/mnist_dataset_directory&#39;</span>
<span class="gp">... </span>    <span class="c1"># The detail of create_dataset method shown in model_zoo.official.cv.lenet.src.dataset.py</span>
<span class="gp">... </span>    <span class="n">ds_train</span> <span class="o">=</span> <span class="n">create_dataset</span><span class="p">(</span><span class="n">mnist_dataset_dir</span><span class="p">,</span> <span class="mi">32</span><span class="p">)</span>
<span class="gp">... </span>    <span class="c1"># The detail of LeNet5 shown in model_zoo.official.cv.lenet.src.lenet.py</span>
<span class="gp">... </span>    <span class="n">network</span> <span class="o">=</span> <span class="n">LeNet5</span><span class="p">(</span><span class="mi">10</span><span class="p">)</span>
<span class="gp">... </span>    <span class="n">net_loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">(</span><span class="n">sparse</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s2">&quot;mean&quot;</span><span class="p">)</span>
<span class="gp">... </span>    <span class="n">net_opt</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">network</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="mf">0.01</span><span class="p">,</span> <span class="mf">0.9</span><span class="p">)</span>
<span class="gp">... </span>    <span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">network</span><span class="p">,</span> <span class="n">net_loss</span><span class="p">,</span> <span class="n">net_opt</span><span class="p">,</span> <span class="n">metrics</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;Accuracy&quot;</span><span class="p">:</span> <span class="n">Accuracy</span><span class="p">()})</span>
<span class="gp">... </span>    <span class="c1"># Simple usage for collect landscape information:</span>
<span class="gp">... </span>    <span class="n">interval_1</span> <span class="o">=</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">]</span>
<span class="gp">... </span>    <span class="n">summary_collector</span> <span class="o">=</span> <span class="n">SummaryCollector</span><span class="p">(</span><span class="n">summary_dir</span><span class="o">=</span><span class="s1">&#39;./summary/lenet_interval_1&#39;</span><span class="p">,</span>
<span class="gp">... </span>                                         <span class="n">collect_specified_data</span><span class="o">=</span><span class="p">{</span><span class="s1">&#39;collect_landscape&#39;</span><span class="p">:{</span><span class="s2">&quot;landscape_size&quot;</span><span class="p">:</span> <span class="mi">4</span><span class="p">,</span>
<span class="gp">... </span>                                                                                       <span class="s2">&quot;unit&quot;</span><span class="p">:</span> <span class="s2">&quot;step&quot;</span><span class="p">,</span>
<span class="gp">... </span>                                                                         <span class="s2">&quot;create_landscape&quot;</span><span class="p">:{</span><span class="s2">&quot;train&quot;</span><span class="p">:</span><span class="kc">True</span><span class="p">,</span>
<span class="gp">... </span>                                                                                            <span class="s2">&quot;result&quot;</span><span class="p">:</span><span class="kc">False</span><span class="p">},</span>
<span class="gp">... </span>                                                                         <span class="s2">&quot;num_samples&quot;</span><span class="p">:</span> <span class="mi">2048</span><span class="p">,</span>
<span class="gp">... </span>                                                                         <span class="s2">&quot;intervals&quot;</span><span class="p">:</span> <span class="p">[</span><span class="n">interval_1</span><span class="p">]}</span>
<span class="gp">... </span>                                                                   <span class="p">})</span>
<span class="gp">... </span>    <span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">ds_train</span><span class="p">,</span> <span class="n">callbacks</span><span class="o">=</span><span class="p">[</span><span class="n">summary_collector</span><span class="p">],</span> <span class="n">dataset_sink_mode</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="gp">...</span>
<span class="gp">... </span>    <span class="c1"># Simple usage for visualization landscape:</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="nf">callback_fn</span><span class="p">():</span>
<span class="gp">... </span>        <span class="n">network</span> <span class="o">=</span> <span class="n">LeNet5</span><span class="p">(</span><span class="mi">10</span><span class="p">)</span>
<span class="gp">... </span>        <span class="n">net_loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">(</span><span class="n">sparse</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s2">&quot;mean&quot;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="n">metrics</span> <span class="o">=</span> <span class="p">{</span><span class="s2">&quot;Loss&quot;</span><span class="p">:</span> <span class="n">Loss</span><span class="p">()}</span>
<span class="gp">... </span>        <span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">network</span><span class="p">,</span> <span class="n">net_loss</span><span class="p">,</span> <span class="n">metrics</span><span class="o">=</span><span class="n">metrics</span><span class="p">)</span>
<span class="gp">... </span>        <span class="n">mnist_dataset_dir</span> <span class="o">=</span> <span class="s1">&#39;/path/to/mnist_dataset_directory&#39;</span>
<span class="gp">... </span>        <span class="n">ds_eval</span> <span class="o">=</span> <span class="n">create_dataset</span><span class="p">(</span><span class="n">mnist_dataset_dir</span><span class="p">,</span> <span class="mi">32</span><span class="p">)</span>
<span class="gp">... </span>        <span class="k">return</span> <span class="n">model</span><span class="p">,</span> <span class="n">network</span><span class="p">,</span> <span class="n">ds_eval</span><span class="p">,</span> <span class="n">metrics</span>
<span class="gp">...</span>
<span class="gp">... </span>    <span class="n">summary_landscape</span> <span class="o">=</span> <span class="n">SummaryLandscape</span><span class="p">(</span><span class="s1">&#39;./summary/lenet_interval_1&#39;</span><span class="p">)</span>
<span class="gp">... </span>    <span class="c1"># parameters of collect_landscape can be modified or unchanged</span>
<span class="gp">... </span>    <span class="n">summary_landscape</span><span class="o">.</span><span class="n">gen_landscapes_with_multi_process</span><span class="p">(</span><span class="n">callback_fn</span><span class="p">,</span>
<span class="gp">... </span>                                                       <span class="n">collect_landscape</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;landscape_size&quot;</span><span class="p">:</span> <span class="mi">4</span><span class="p">,</span>
<span class="gp">... </span>                                                                        <span class="s2">&quot;create_landscape&quot;</span><span class="p">:{</span><span class="s2">&quot;train&quot;</span><span class="p">:</span><span class="kc">False</span><span class="p">,</span>
<span class="gp">... </span>                                                                                           <span class="s2">&quot;result&quot;</span><span class="p">:</span><span class="kc">False</span><span class="p">},</span>
<span class="gp">... </span>                                                                         <span class="s2">&quot;num_samples&quot;</span><span class="p">:</span> <span class="mi">2048</span><span class="p">,</span>
<span class="gp">... </span>                                                                         <span class="s2">&quot;intervals&quot;</span><span class="p">:</span> <span class="p">[</span><span class="n">interval_1</span><span class="p">]},</span>
<span class="gp">... </span>                                                        <span class="n">device_ids</span><span class="o">=</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span>
</pre></div>
</div>
<dl class="method">
<dt id="mindspore.train.callback.SummaryLandscape.clean_ckpt">
<code class="sig-name descname">clean_ckpt</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.SummaryLandscape.clean_ckpt" title="Permalink to this definition">¶</a></dt>
<dd><p>清理checkpoint。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.train.callback.SummaryLandscape.gen_landscapes_with_multi_process">
<code class="sig-name descname">gen_landscapes_with_multi_process</code><span class="sig-paren">(</span><em class="sig-param">callback_fn</em>, <em class="sig-param">collect_landscape=None</em>, <em class="sig-param">device_ids=None</em>, <em class="sig-param">output=None</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.callback.SummaryLandscape.gen_landscapes_with_multi_process" title="Permalink to this definition">¶</a></dt>
<dd><p>使用多进程来生成地形图。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>callback_fn</strong> (python function) - Python函数对象，用户需要写一个没有输入的函数，返回值要求如下。</p>
<ul>
<li><p>mindspore.train.Model：用户的模型。</p></li>
<li><p>mindspore.nn.Cell：用户的网络。</p></li>
<li><p>mindspore.dataset：创建loss所需要的用户数据集。</p></li>
<li><p>mindspore.nn.Metrics：用户的评估指标。</p></li>
</ul>
</li>
<li><p><strong>collect_landscape</strong> (Union[dict, None]) - 创建loss地形图所用的参数含义与SummaryCollector同名字段一致。此处设置的目的是允许用户可以自由修改创建loss地形图参数。默认值：None。</p>
<ul>
<li><p><strong>landscape_size</strong> (int) - 指定生成loss地形图的图像分辨率。例如：如果设置为128，则loss地形图的分辨率是128*128。计算loss地形图的时间随着分辨率的增大而增加。默认值：40。可选值：3-256。</p></li>
<li><p><strong>create_landscape</strong> (dict) - 选择创建哪种类型的loss地形图，分为训练过程loss地形图（train）和训练结果loss地形图（result）。默认值：{“train”: True, “result”: True}。可选值：True/False。</p></li>
<li><p><strong>num_samples</strong> (int) - 创建loss地形图所使用的数据集的大小。例如：在图像数据集中，您可以设置 <cite>num_samples</cite> 是128，这意味着将有128张图片被用来创建loss地形图。注意：<cite>num_samples</cite> 越大，计算loss地形图时间越长。默认值：128。</p></li>
<li><p><strong>intervals</strong> (List[List[int]]) - 指定创建loss地形图所需要的checkpoint区间。例如：如果用户想要创建两张训练过程的loss地形图，分别为1-5epoch和6-10epoch，则用户可以设置[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]。注意：每个区间至少包含3个epoch。</p></li>
</ul>
</li>
<li><p><strong>device_ids</strong> (List(int)) - 指定创建loss地形图所使用的目标设备的ID。例如：[0, 1]表示使用设备0和设备1来创建loss地形图。默认值：None。</p></li>
<li><p><strong>output</strong> (str) - 指定保存loss地形图的路径。默认值：None。默认保存路径与summary文件相同。</p></li>
</ul>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="mindspore-train-train-thor">
<h2>mindspore.train.train_thor<a class="headerlink" href="#mindspore-train-train-thor" title="Permalink to this headline">¶</a></h2>
<p>转换为二阶相关的类和函数。</p>
<dl class="class">
<dt id="mindspore.train.train_thor.ConvertModelUtils">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.train_thor.</code><code class="sig-name descname">ConvertModelUtils</code><a class="headerlink" href="#mindspore.train.train_thor.ConvertModelUtils" title="Permalink to this definition">¶</a></dt>
<dd><p>该接口用于增加计算图，提升二阶算法THOR运行时的性能。</p>
<dl class="method">
<dt id="mindspore.train.train_thor.ConvertModelUtils.convert_to_thor_model">
<em class="property">static </em><code class="sig-name descname">convert_to_thor_model</code><span class="sig-paren">(</span><em class="sig-param">model</em>, <em class="sig-param">network</em>, <em class="sig-param">loss_fn=None</em>, <em class="sig-param">optimizer=None</em>, <em class="sig-param">metrics=None</em>, <em class="sig-param">amp_level='O0'</em>, <em class="sig-param">loss_scale_manager=None</em>, <em class="sig-param">keep_batchnorm_fp32=False</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.train_thor.ConvertModelUtils.convert_to_thor_model" title="Permalink to this definition">¶</a></dt>
<dd><p>该接口用于增加计算图，提升二阶算法THOR运行时的性能。</p>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>model</strong> (Object) - 用于训练的高级API。 <cite>Model</cite> 将图层分组到具有训练特征的对象中。</p></li>
<li><p><strong>network</strong> (Cell) - 训练网络。</p></li>
<li><p><strong>loss_fn</strong> (Cell) - 目标函数。默认值：None。</p></li>
<li><p><strong>optimizer</strong> (Cell) - 用于更新权重的优化器。默认值：None。</p></li>
<li><p><strong>metrics</strong> (Union[dict, set]) - 在训练期间由模型评估的词典或一组度量。例如：{‘accuracy’, ‘recall’}。默认值：None。</p></li>
<li><p><strong>amp_level</strong> (str) - 混合精度训练的级别。支持[“O0”, “O2”, “O3”, “auto”]。默认值：”O0”。</p>
<ul>
<li><p><strong>O0</strong> - 不改变。</p></li>
<li><p><strong>O2</strong> - 将网络转换为float16，使用动态loss scale保持BN在float32中运行。</p></li>
<li><p><strong>O3</strong> - 将网络强制转换为float16，并使用附加属性 <cite>keep_batchnorm_fp32=False</cite> 。</p></li>
<li><p><strong>auto</strong> - 在不同设备中，将级别设置为建议级别。GPU上建议使用O2，Ascend上建议使用O3。建议级别基于专家经验，不能总是一概而论。用户应指定特殊网络的级别。</p></li>
</ul>
</li>
<li><p><strong>loss_scale_manager</strong> (Union[None, LossScaleManager]) - 如果为None，则不会按比例缩放loss。否则，通过LossScaleManager和优化器缩放loss不能为None。这是一个关键参数。例如，使用 <cite>loss_scale_manager=None</cite> 设置值。</p></li>
<li><p><strong>keep_batchnorm_fp32</strong> (bool) - 保持BN在 <cite>float32</cite> 中运行。如果为True，则将覆盖之前的级别设置。默认值：False。</p></li>
</ul>
<p><strong>返回：</strong></p>
<p>model (Object) - 用于训练的高级API。 <cite>Model</cite> 将图层分组到具有训练特征的对象中。</p>
<p><strong>支持平台：</strong></p>
<p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">nn</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Tensor</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.nn</span> <span class="kn">import</span> <span class="n">thor</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">FixedLossScaleManager</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.callback</span> <span class="kn">import</span> <span class="n">LossMonitor</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.train.train_thor</span> <span class="kn">import</span> <span class="n">ConvertModelUtils</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dataset</span> <span class="o">=</span> <span class="n">create_dataset</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">temp</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">([</span><span class="mf">4e-4</span><span class="p">,</span> <span class="mf">1e-4</span><span class="p">,</span> <span class="mf">1e-5</span><span class="p">,</span> <span class="mf">1e-5</span><span class="p">],</span> <span class="n">mstype</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">opt</span> <span class="o">=</span> <span class="n">thor</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">learning_rate</span><span class="o">=</span><span class="n">temp</span><span class="p">,</span> <span class="n">damping</span><span class="o">=</span><span class="n">temp</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">,</span> <span class="n">loss_scale</span><span class="o">=</span><span class="mi">128</span><span class="p">,</span> <span class="n">frequency</span><span class="o">=</span><span class="mi">4</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">(</span><span class="n">sparse</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">reduction</span><span class="o">=</span><span class="s1">&#39;mean&#39;</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss_scale</span> <span class="o">=</span> <span class="n">FixedLossScaleManager</span><span class="p">(</span><span class="mi">128</span><span class="p">,</span> <span class="n">drop_overflow_update</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="o">=</span><span class="n">loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="o">=</span><span class="n">opt</span><span class="p">,</span> <span class="n">loss_scale_manager</span><span class="o">=</span><span class="n">loss_scale</span><span class="p">,</span> <span class="n">metrics</span><span class="o">=</span><span class="p">{</span><span class="s1">&#39;acc&#39;</span><span class="p">},</span>
<span class="gp">... </span>              <span class="n">amp_level</span><span class="o">=</span><span class="s2">&quot;O2&quot;</span><span class="p">,</span> <span class="n">keep_batchnorm_fp32</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">ConvertModelUtils</span><span class="o">.</span><span class="n">convert_to_thor_model</span><span class="p">(</span><span class="n">model</span><span class="o">=</span><span class="n">model</span><span class="p">,</span> <span class="n">network</span><span class="o">=</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="o">=</span><span class="n">loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="o">=</span><span class="n">opt</span><span class="p">,</span>
<span class="gp">... </span>                                                <span class="n">loss_scale_manager</span><span class="o">=</span><span class="n">loss_scale</span><span class="p">,</span> <span class="n">metrics</span><span class="o">=</span><span class="p">{</span><span class="s1">&#39;acc&#39;</span><span class="p">},</span>
<span class="gp">... </span>                                                <span class="n">amp_level</span><span class="o">=</span><span class="s2">&quot;O2&quot;</span><span class="p">,</span> <span class="n">keep_batchnorm_fp32</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss_cb</span> <span class="o">=</span> <span class="n">LossMonitor</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">dataset</span><span class="p">,</span> <span class="n">callbacks</span><span class="o">=</span><span class="n">loss_cb</span><span class="p">,</span> <span class="n">sink_size</span><span class="o">=</span><span class="mi">4</span><span class="p">,</span> <span class="n">dataset_sink_mode</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

</dd></dl>

<dl class="class">
<dt id="mindspore.train.train_thor.ConvertNetUtils">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.train.train_thor.</code><code class="sig-name descname">ConvertNetUtils</code><a class="headerlink" href="#mindspore.train.train_thor.ConvertNetUtils" title="Permalink to this definition">¶</a></dt>
<dd><p>将网络转换为thor层网络，用于计算并存储二阶信息矩阵。</p>
<dl class="method">
<dt id="mindspore.train.train_thor.ConvertNetUtils.convert_to_thor_net">
<code class="sig-name descname">convert_to_thor_net</code><span class="sig-paren">(</span><em class="sig-param">net</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.train.train_thor.ConvertNetUtils.convert_to_thor_net" title="Permalink to this definition">¶</a></dt>
<dd><p>该接口用于将网络转换为thor层网络，用于计算并存储二阶信息矩阵。</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>此接口由二阶优化器thor自动调用。</p>
</div>
<p><strong>参数：</strong></p>
<p><strong>net</strong> (Cell) - 由二阶优化器thor训练的网络。</p>
<p><strong>支持平台：</strong></p>
<p><code class="docutils literal notranslate"><span class="pre">Ascend</span></code> <code class="docutils literal notranslate"><span class="pre">GPU</span></code></p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">ConvertNetUtils</span><span class="p">()</span><span class="o">.</span><span class="n">convert_to_thor_net</span><span class="p">(</span><span class="n">net</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

</dd></dl>

</div>
</div>


           </div>
           
          </div>
          <footer>
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
        <a href="mindspore.boost.html" class="btn btn-neutral float-right" title="mindspore.boost" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
        <a href="scipy/mindspore.scipy.sparse.linalg.gmres.html" class="btn btn-neutral float-left" title="mindspore.scipy.sparse.linalg.gmres" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
    </div>

  <hr/>

  <div role="contentinfo">
    <p>
        &#169; Copyright 2021, MindSpore.

    </p>
  </div>
    
    
    
    Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>
        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
   

</body>
</html>