

<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8" />
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
  
  <title>mindspore.Model &mdash; MindSpore master documentation</title>
  

  
  <link rel="stylesheet" href="../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />

  
  

  
  

  

  
  <!--[if lt IE 9]>
    <script src="../../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../" src="../../_static/documentation_options.js"></script>
        <script src="../../_static/jquery.js"></script>
        <script src="../../_static/underscore.js"></script>
        <script src="../../_static/doctools.js"></script>
        <script src="../../_static/language_data.js"></script>
        <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
    
    <script type="text/javascript" src="../../_static/js/theme.js"></script>

    
    <link rel="index" title="Index" href="../../genindex.html" />
    <link rel="search" title="Search" href="../../search.html" />
    <link rel="next" title="mindspore.DatasetHelper" href="mindspore.DatasetHelper.html" />
    <link rel="prev" title="mindspore.get_seed" href="mindspore.get_seed.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../index.html" class="icon icon-home"> MindSpore
          

          
          </a>

          
            
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">MindSpore Python API</span></p>
<ul class="current">
<li class="toctree-l1 current"><a class="reference internal" href="../mindspore.html">mindspore</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id1">张量</a></li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id2">参数</a></li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id3">数据类型</a></li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id4">随机种子</a></li>
<li class="toctree-l2 current"><a class="reference internal" href="../mindspore.html#id5">模型</a><ul class="current">
<li class="toctree-l3 current"><a class="current reference internal" href="#">mindspore.Model</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id6">数据处理工具</a></li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id7">混合精度管理</a></li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id8">序列化</a></li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id9">即时编译</a></li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id10">日志</a></li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id11">自动混合精度</a></li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id12">安装验证</a></li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id13">调试</a></li>
<li class="toctree-l2"><a class="reference internal" href="../mindspore.html#id14">内存回收</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.common.initializer.html">mindspore.common.initializer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.communication.html">mindspore.communication</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.compression.html">mindspore.compression</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.context.html">mindspore.context</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.dataset.html">mindspore.dataset</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.dataset.audio.html">mindspore.dataset.audio</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.dataset.config.html">mindspore.dataset.config</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.dataset.text.html">mindspore.dataset.text</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.dataset.transforms.html">mindspore.dataset.transforms</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.dataset.vision.html">mindspore.dataset.vision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.mindrecord.html">mindspore.mindrecord</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.nn.html">mindspore.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.nn.probability.html">mindspore.nn.probability</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.nn.transformer.html">mindspore.nn.transformer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.numpy.html">mindspore.numpy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.ops.html">mindspore.ops</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.parallel.html">mindspore.parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.parallel.nn.html">mindspore.parallel.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.profiler.html">mindspore.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.scipy.html">mindspore.scipy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.train.html">mindspore.train</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mindspore.boost.html">mindspore.boost</a></li>
</ul>
<p class="caption"><span class="caption-text">MindSpore C++ API</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://www.mindspore.cn/lite/api/zh-CN/master/api_cpp/mindspore.html">MindSpore Lite↗</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../../index.html">MindSpore</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          

















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../index.html" class="icon icon-home"></a> &raquo;</li>
        
          <li><a href="../mindspore.html">mindspore</a> &raquo;</li>
        
      <li>mindspore.Model</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
          
            <a href="../../_sources/api_python/mindspore/mindspore.Model.rst.txt" rel="nofollow"> View page source</a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="mindspore-model">
<h1>mindspore.Model<a class="headerlink" href="#mindspore-model" title="Permalink to this headline">¶</a></h1>
<dl class="class">
<dt id="mindspore.Model">
<em class="property">class </em><code class="sig-prename descclassname">mindspore.</code><code class="sig-name descname">Model</code><span class="sig-paren">(</span><em class="sig-param">network</em>, <em class="sig-param">loss_fn=None</em>, <em class="sig-param">optimizer=None</em>, <em class="sig-param">metrics=None</em>, <em class="sig-param">eval_network=None</em>, <em class="sig-param">eval_indexes=None</em>, <em class="sig-param">amp_level=&quot;O0&quot;</em>, <em class="sig-param">boost_level=&quot;O0&quot;</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.Model" title="Permalink to this definition">¶</a></dt>
<dd><p>模型训练或推理的高阶接口。 <cite>Model</cite> 会根据用户传入的参数封装可训练或推理的实例。</p>
<p>如果使用混合精度功能，需要同时设置`optimizer`参数，否则混合精度功能不生效。
当使用混合精度时，优化器中的 <cite>global_step</cite> 可能与模型中的 <cite>cur_step_num</cite> 不同。</p>
<p><strong>参数：</strong></p>
<ul>
<li><p><strong>network</strong> (Cell) – 用于训练或推理的神经网络。</p></li>
<li><p><strong>loss_fn</strong> (Cell) - 损失函数。如果 <cite>loss_fn</cite> 为None，<cite>network</cite> 中需要进行损失函数计算，必要时也需要进行并行计算。默认值：None。</p></li>
<li><p><strong>optimizer</strong> (Cell) - 用于更新网络权重的优化器。如果 <cite>optimizer</cite> 为None， <cite>network</cite> 中需要进行反向传播和网络权重更新。默认值：None。</p></li>
<li><p><strong>metrics</strong> (Union[dict, set]) - 用于模型评估的一组评价函数。例如：{‘accuracy’, ‘recall’}。默认值：None。</p></li>
<li><p><strong>eval_network</strong> (Cell) - 用于评估的神经网络。未定义情况下，<cite>Model</cite> 会使用 <cite>network</cite> 和 <cite>loss_fn</cite> 封装一个 <cite>eval_network</cite> 。默认值：None。</p></li>
<li><p><strong>eval_indexes</strong> (list) - 在定义 <cite>eval_network</cite> 的情况下使用。如果 <cite>eval_indexes</cite> 为默认值None，<cite>Model</cite> 会将 <cite>eval_network</cite> 的所有输出传给 <cite>metrics</cite> 。如果配置 <cite>eval_indexes</cite> ，必须包含三个元素，分别为损失值、预测值和标签在 <cite>eval_network</cite> 输出中的位置，此时，损失值将传给损失评价函数，预测值和标签将传给其他评价函数。推荐使用评价函数的 <cite>mindspore.nn.Metric.set_indexes</cite> 代替 <cite>eval_indexes</cite> 。默认值：None。</p></li>
<li><p><strong>amp_level</strong> (str) - <cite>mindspore.build_train_network</cite> 的可选参数 <cite>level</cite> ， <cite>level</cite> 为混合精度等级，该参数支持[“O0”, “O2”, “O3”, “auto”]。默认值：”O0”。</p>
<ul class="simple">
<li><p>“O0”: 不变化。</p></li>
<li><p>“O2”: 将网络精度转为float16，BatchNorm保持float32精度，使用动态调整损失缩放系数（loss scale）的策略。</p></li>
<li><p>“O3”: 将网络精度（包括BatchNorm）转为float16，不使用损失缩放策略。</p></li>
<li><p>auto: 为不同处理器设置专家推荐的混合精度等级，如在GPU上设为”O2”，在Ascend上设为”O3”。该设置方式可能在部分场景下不适用，建议用户根据具体的网络模型自定义设置 <cite>amp_level</cite> 。</p></li>
</ul>
<p>在GPU上建议使用”O2”，在Ascend上建议使用”O3”。
通过 <cite>kwargs</cite> 设置 <cite>keep_batchnorm_fp32</cite> ，可修改BatchNorm的精度策略， <cite>keep_batchnorm_fp32</cite> 必须为bool类型；通过 <cite>kwargs</cite> 设置 <cite>loss_scale_manager</cite> 可修改损失缩放策略，<cite>loss_scale_manager</cite> 必须为 <a class="reference internal" href="mindspore.LossScaleManager.html#mindspore.LossScaleManager" title="mindspore.LossScaleManager"><code class="xref py py-class docutils literal notranslate"><span class="pre">mindspore.LossScaleManager</span></code></a> 的子类，
关于 <cite>amp_level</cite> 详见 <cite>mindpore.build_train_network</cite> 。</p>
</li>
<li><p><strong>boost_level</strong> (str) – <cite>mindspore.boost</cite> 的可选参数, 为boost模式训练等级。支持[“O0”, “O1”, “O2”]. 默认值: “O0”.</p>
<ul class="simple">
<li><p>“O0”: 不变化。</p></li>
<li><p>“O1”: 启用boost模式, 性能将提升约20%, 准确率保持不变。</p></li>
<li><p>“O2”: 启用boost模式, 性能将提升约30%, 准确率下降小于3%。</p></li>
</ul>
<p>如果你想设置boost模式, 可以将 <cite>boost_config_dict</cite> 设置为 <cite>boost.py</cite> 。</p>
</li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span><span class="p">,</span> <span class="n">nn</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">class</span> <span class="nc">Net</span><span class="p">(</span><span class="n">nn</span><span class="o">.</span><span class="n">Cell</span><span class="p">):</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">num_class</span><span class="o">=</span><span class="mi">10</span><span class="p">,</span> <span class="n">num_channel</span><span class="o">=</span><span class="mi">1</span><span class="p">):</span>
<span class="gp">... </span>        <span class="nb">super</span><span class="p">(</span><span class="n">Net</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">conv1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="n">num_channel</span><span class="p">,</span> <span class="mi">6</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="n">pad_mode</span><span class="o">=</span><span class="s1">&#39;valid&#39;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">conv2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">6</span><span class="p">,</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="n">pad_mode</span><span class="o">=</span><span class="s1">&#39;valid&#39;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">fc1</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Dense</span><span class="p">(</span><span class="mi">16</span><span class="o">*</span><span class="mi">5</span><span class="o">*</span><span class="mi">5</span><span class="p">,</span> <span class="mi">120</span><span class="p">,</span> <span class="n">weight_init</span><span class="o">=</span><span class="s1">&#39;ones&#39;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">fc2</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Dense</span><span class="p">(</span><span class="mi">120</span><span class="p">,</span> <span class="mi">84</span><span class="p">,</span> <span class="n">weight_init</span><span class="o">=</span><span class="s1">&#39;ones&#39;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">fc3</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Dense</span><span class="p">(</span><span class="mi">84</span><span class="p">,</span> <span class="n">num_class</span><span class="p">,</span> <span class="n">weight_init</span><span class="o">=</span><span class="s1">&#39;ones&#39;</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">relu</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">ReLU</span><span class="p">()</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">max_pool2d</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">MaxPool2d</span><span class="p">(</span><span class="n">kernel_size</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">stride</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span>
<span class="gp">... </span>        <span class="bp">self</span><span class="o">.</span><span class="n">flatten</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Flatten</span><span class="p">()</span>
<span class="gp">...</span>
<span class="gp">... </span>    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_pool2d</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv1</span><span class="p">(</span><span class="n">x</span><span class="p">)))</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_pool2d</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">conv2</span><span class="p">(</span><span class="n">x</span><span class="p">)))</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">flatten</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">fc1</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">relu</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">fc2</span><span class="p">(</span><span class="n">x</span><span class="p">))</span>
<span class="gp">... </span>        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fc3</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
<span class="gp">... </span>        <span class="k">return</span> <span class="n">x</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optim</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">params</span><span class="o">=</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="o">=</span><span class="n">loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="o">=</span><span class="n">optim</span><span class="p">,</span> <span class="n">metrics</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># For details about how to build the dataset, please refer to the tutorial</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># document on the official website.</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dataset</span> <span class="o">=</span> <span class="n">create_custom_dataset</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="n">dataset</span><span class="p">)</span>
</pre></div>
</div>
<dl class="method">
<dt id="mindspore.Model.build">
<code class="sig-name descname">build</code><span class="sig-paren">(</span><em class="sig-param">train_dataset=None</em>, <em class="sig-param">valid_dataset=None</em>, <em class="sig-param">sink_size=-1</em>, <em class="sig-param">epoch=1</em>, <em class="sig-param">jit_config=None</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.Model.build" title="Permalink to this definition">¶</a></dt>
<dd><p>数据下沉模式下构建计算图和数据图。</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>如果预先调用该接口构建计算图，那么 <cite>Model.train</cite> 会直接执行计算图。预构建计算图目前仅支持GRAPH_MOD模式和Ascend处理器，仅支持数据下沉模式。</p>
</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>train_dataset</strong> (Dataset) – 一个训练集迭代器。如果定义了 <cite>train_dataset</cite> ，将会构建训练计算图。默认值：None。</p></li>
<li><p><strong>valid_dataset</strong> (Dataset) - 一个验证集迭代器。如果定义了 <cite>valid_dataset</cite> ，将会构建验证计算图，此时 <cite>Model</cite> 中的 <cite>metrics</cite> 不能为None。默认值：None。</p></li>
<li><p><strong>sink_size</strong> (int) - 控制每次数据下沉的数据量。默认值：-1。</p></li>
<li><p><strong>epoch</strong> (int) - 控制训练轮次。默认值：1。</p></li>
<li><p><strong>jit_config</strong> (Union[str, str]) – 控制jit设置。默认情况下，如果设置为None，计算图会按默认方式编译。用户可以使用字典自定义编译配置。 例如，可以设置 {‘jit_level’:’o0’} 来控制 jit 级别。支持控制的数据如下所示。 默认值：None。</p>
<ul>
<li><p>jit_level (string): 控制计算图编译优化级别。可选项: o0/o1。默认值: o1。如果设置为o0，则计算图编译将会传入类似于图阶段的组合。</p></li>
</ul>
</li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span><span class="p">,</span> <span class="n">nn</span><span class="p">,</span> <span class="n">FixedLossScaleManager</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># For details about how to build the dataset, please refer to the tutorial</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># document on the official website.</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dataset</span> <span class="o">=</span> <span class="n">create_custom_dataset</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss_scale_manager</span> <span class="o">=</span> <span class="n">FixedLossScaleManager</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optim</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">params</span><span class="o">=</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="o">=</span><span class="n">loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="o">=</span><span class="n">optim</span><span class="p">,</span> <span class="n">metrics</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">loss_scale_manager</span><span class="o">=</span><span class="n">loss_scale_manager</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span><span class="o">.</span><span class="n">build</span><span class="p">(</span><span class="n">dataset</span><span class="p">,</span> <span class="n">epoch</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="n">dataset</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="method">
<dt id="mindspore.Model.eval">
<code class="sig-name descname">eval</code><span class="sig-paren">(</span><em class="sig-param">valid_dataset</em>, <em class="sig-param">callbacks=None</em>, <em class="sig-param">dataset_sink_mode=True</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.Model.eval" title="Permalink to this definition">¶</a></dt>
<dd><p>模型评估接口。</p>
<p>使用PyNative模式或CPU处理器时，模型评估流程将以非下沉模式执行。</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>如果 <cite>dataset_sink_mode</cite> 配置为True，数据将被发送到处理器中。此时数据集与模型绑定，数据集仅能在当前模型中使用。如果处理器是Ascend，数据特征将被逐一传输，每次数据传输的上限是256M。
该接口会构建并执行计算图，如果使用前先执行了 <cite>Model.build</cite> ，那么它会直接执行计算图而不构建。</p>
</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>valid_dataset</strong> (Dataset) – 评估模型的数据集。</p></li>
<li><p><strong>callbacks</strong> (Optional[list(Callback), Callback]) - 评估过程中需要执行的回调对象或回调对象列表。默认值：None。</p></li>
<li><p><strong>dataset_sink_mode</strong> (bool) - 数据是否直接下沉至处理器进行处理。默认值：True。</p></li>
</ul>
<p><strong>返回：</strong></p>
<p>Dict，key是用户定义的评价指标名称，value是以推理模式运行的评估结果。</p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span><span class="p">,</span> <span class="n">nn</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># For details about how to build the dataset, please refer to the tutorial</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># document on the official website.</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dataset</span> <span class="o">=</span> <span class="n">create_custom_dataset</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="o">=</span><span class="n">loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">metrics</span><span class="o">=</span><span class="p">{</span><span class="s1">&#39;acc&#39;</span><span class="p">})</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">acc</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="n">eval</span><span class="p">(</span><span class="n">dataset</span><span class="p">,</span> <span class="n">dataset_sink_mode</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="method">
<dt id="mindspore.Model.eval_network">
<em class="property">property </em><code class="sig-name descname">eval_network</code><a class="headerlink" href="#mindspore.Model.eval_network" title="Permalink to this definition">¶</a></dt>
<dd><p>获取该模型的评价网络。</p>
<p><strong>返回：</strong></p>
<p>评估网络实例。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.Model.infer_predict_layout">
<code class="sig-name descname">infer_predict_layout</code><span class="sig-paren">(</span><em class="sig-param">*predict_data</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.Model.infer_predict_layout" title="Permalink to this definition">¶</a></dt>
<dd><p>在 <cite>AUTO_PARALLEL</cite> 或 <cite>SEMI_AUTO_PARALLEL</cite> 模式下为预测网络生成参数layout，数据可以是单个或多个张量。</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>同一批次数据应放在一个张量中。</p>
</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>predict_data</strong> (Tensor) – 预测样本，数据可以是单个张量、张量列表或张量元组。</p></li>
</ul>
<p><strong>返回：</strong></p>
<p>Dict，用于加载分布式checkpoint的参数layout字典。它总是作为 <cite>load_distributed_checkpoint()</cite> 函数的一个入参。</p>
<p><strong>异常：</strong></p>
<ul class="simple">
<li><p><strong>RuntimeError</strong> – 非图模式（GRAPH_MODE）将会抛出该异常。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="c1"># This example should be run with multiple devices. Refer to the tutorial &gt; Distributed Training on</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># mindspore.cn.</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">mindspore</span> <span class="k">as</span> <span class="nn">ms</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span><span class="p">,</span> <span class="n">context</span><span class="p">,</span> <span class="n">Tensor</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.context</span> <span class="kn">import</span> <span class="n">ParallelMode</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.communication</span> <span class="kn">import</span> <span class="n">init</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">context</span><span class="o">.</span><span class="n">set_context</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="n">context</span><span class="o">.</span><span class="n">GRAPH_MODE</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">init</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">context</span><span class="o">.</span><span class="n">set_auto_parallel_context</span><span class="p">(</span><span class="n">full_batch</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">parallel_mode</span><span class="o">=</span><span class="n">ParallelMode</span><span class="o">.</span><span class="n">SEMI_AUTO_PARALLEL</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">input_data</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">randint</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">255</span><span class="p">,</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">]),</span> <span class="n">ms</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">Net</span><span class="p">())</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">predict_map</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="n">infer_predict_layout</span><span class="p">(</span><span class="n">input_data</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="method">
<dt id="mindspore.Model.infer_train_layout">
<code class="sig-name descname">infer_train_layout</code><span class="sig-paren">(</span><em class="sig-param">train_dataset</em>, <em class="sig-param">dataset_sink_mode=True</em>, <em class="sig-param">sink_size=-1</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.Model.infer_train_layout" title="Permalink to this definition">¶</a></dt>
<dd><p>在 <cite>AUTO_PARALLEL</cite> 或 <cite>SEMI_AUTO_PARALLEL</cite> 模式下为训练网络生成参数layout，当前仅支持在数据下沉模式下使用。</p>
<div class="admonition warning">
<p class="admonition-title">Warning</p>
<p>这是一个实验性的原型，可能会被改变或删除。</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>这是一个预编译函数。参数必须与Model.train()函数相同。</p>
</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>train_dataset</strong> (Dataset) – 一个训练数据集迭代器。如果没有损失函数（loss_fn），返回一个包含多个数据的元组（data1, data2, data3, …）并传递给网络。否则，返回一个元组（data, label），数据和标签将被分别传递给网络和损失函数。</p></li>
<li><p><strong>dataset_sink_mode</strong> (bool) – 决定是否以数据集下沉模式进行训练。默认值：True。PyNative模式下或处理器为CPU时，训练模型流程使用的是数据不下沉（non-sink）模式。默认值：True。</p></li>
<li><p><strong>sink_size</strong> (int) – 控制每次数据下沉的数据量，如果 <cite>sink_size</cite> =-1，则每一次epoch下沉完整数据集。如果 <cite>sink_size</cite> &gt;0，则每一次epoch下沉数据量为 <cite>sink_size</cite> 的数据集。如果 <cite>dataset_sink_mode</cite> 为False，则设置 <cite>sink_size</cite> 为无效。默认值：-1。</p></li>
</ul>
<p><strong>返回：</strong></p>
<p>Dict，用于加载分布式checkpoint的参数layout字典。</p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="c1"># This example should be run with multiple devices. Refer to the tutorial &gt; Distributed Training on</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># mindspore.cn.</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">mindspore</span> <span class="k">as</span> <span class="nn">ms</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span><span class="p">,</span> <span class="n">context</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">,</span> <span class="n">nn</span><span class="p">,</span> <span class="n">FixedLossScaleManager</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.context</span> <span class="kn">import</span> <span class="n">ParallelMode</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore.communication</span> <span class="kn">import</span> <span class="n">init</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">context</span><span class="o">.</span><span class="n">set_context</span><span class="p">(</span><span class="n">mode</span><span class="o">=</span><span class="n">context</span><span class="o">.</span><span class="n">GRAPH_MODE</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">init</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">context</span><span class="o">.</span><span class="n">set_auto_parallel_context</span><span class="p">(</span><span class="n">parallel_mode</span><span class="o">=</span><span class="n">ParallelMode</span><span class="o">.</span><span class="n">SEMI_AUTO_PARALLEL</span><span class="p">)</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># For details about how to build the dataset, please refer to the tutorial</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># document on the official website.</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dataset</span> <span class="o">=</span> <span class="n">create_custom_dataset</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss_scale_manager</span> <span class="o">=</span> <span class="n">FixedLossScaleManager</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optim</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">params</span><span class="o">=</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="o">=</span><span class="n">loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="o">=</span><span class="n">optim</span><span class="p">,</span> <span class="n">metrics</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">loss_scale_manager</span><span class="o">=</span><span class="n">loss_scale_manager</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">layout_dict</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="n">infer_train_layout</span><span class="p">(</span><span class="n">dataset</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="method">
<dt id="mindspore.Model.predict">
<code class="sig-name descname">predict</code><span class="sig-paren">(</span><em class="sig-param">*predict_data</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.Model.predict" title="Permalink to this definition">¶</a></dt>
<dd><p>输入样本得到预测结果。</p>
<p><strong>参数：</strong></p>
<p><strong>predict_data</strong> (Tensor) – 预测样本，数据可以是单个张量、张量列表或张量元组。</p>
<p><strong>返回：</strong></p>
<p>返回预测结果，类型是Tensor或Tensor元组。</p>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">mindspore</span> <span class="k">as</span> <span class="nn">ms</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span><span class="p">,</span> <span class="n">Tensor</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">input_data</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">randint</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">255</span><span class="p">,</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">]),</span> <span class="n">ms</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">Net</span><span class="p">())</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">result</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="n">predict</span><span class="p">(</span><span class="n">input_data</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="method">
<dt id="mindspore.Model.predict_network">
<em class="property">property </em><code class="sig-name descname">predict_network</code><a class="headerlink" href="#mindspore.Model.predict_network" title="Permalink to this definition">¶</a></dt>
<dd><p>获得该模型的预测网络。</p>
<p><strong>返回：</strong></p>
<p>预测网络实例。</p>
</dd></dl>

<dl class="method">
<dt id="mindspore.Model.train">
<code class="sig-name descname">train</code><span class="sig-paren">(</span><em class="sig-param">epoch</em>, <em class="sig-param">train_dataset</em>, <em class="sig-param">callbacks=None</em>, <em class="sig-param">dataset_sink_mode=True</em>, <em class="sig-param">sink_size=-1</em><span class="sig-paren">)</span><a class="headerlink" href="#mindspore.Model.train" title="Permalink to this definition">¶</a></dt>
<dd><p>模型训练接口。</p>
<p>使用PYNATIVE_MODE模式或CPU处理器时，模型训练流程将以非下沉模式执行。</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<ul class="simple">
<li><p>如果 <cite>dataset_sink_mode</cite> 配置为True，数据将被送到处理器中。如果处理器是Ascend，数据特征将被逐一传输，每次数据传输的上限是256M。</p></li>
<li><p>如果 <cite>dataset_sink_mode</cite> 配置为True，仅在每个epoch结束时调用Callback实例的step_end方法。</p></li>
<li><p>如果 <cite>dataset_sink_mode</cite> 配置为True，数据集仅能在当前模型中使用。</p></li>
<li><p>如果 <cite>sink_size</cite> 大于零，每次epoch可以无限次遍历数据集，直到遍历数据量等于 <cite>sink_size</cite> 为止。</p></li>
<li><p>每次epoch将从上一次遍历的最后位置继续开始遍历。该接口会构建并执行计算图，如果使用前先执行了 <cite>Model.build</cite> ，那么它会直接执行计算图而不构建。</p></li>
</ul>
</div>
<p><strong>参数：</strong></p>
<ul class="simple">
<li><p><strong>epoch</strong> (int) – 训练执行轮次。通常每个epoch都会使用全量数据集进行训练。当 <cite>dataset_sink_mode</cite> 设置为True且 <cite>sink_size</cite> 大于零时，则每个epoch训练次数为 <cite>sink_size</cite> 而不是数据集的总步数。</p></li>
<li><p><strong>train_dataset</strong> (Dataset) – 一个训练数据集迭代器。如果定义了 <cite>loss_fn</cite> ，则数据和标签会被分别传给 <cite>network</cite> 和 <cite>loss_fn</cite> ，此时数据集需要返回一个元组（data, label）。如果数据集中有多个数据或者标签，可以设置 <cite>loss_fn</cite> 为None，并在 <cite>network</cite> 中实现损失函数计算，此时数据集返回的所有数据组成的元组（data1, data2, data3, …）会传给 <cite>network</cite> 。</p></li>
<li><p><strong>callbacks</strong> (Optional[list[Callback], Callback]) – 训练过程中需要执行的回调对象或者回调对象列表。默认值：None。</p></li>
<li><p><strong>dataset_sink_mode</strong> (bool) – 数据是否直接下沉至处理器进行处理。使用PYNATIVE_MODE模式或CPU处理器时，模型训练流程将以非下沉模式执行。默认值：True。</p></li>
<li><p><strong>sink_size</strong> (int) – 控制每次数据下沉的数据量。<cite>dataset_sink_mode</cite> 为False时 <cite>sink_size</cite> 无效。如果sink_size=-1，则每一次epoch下沉完整数据集。如果sink_size&gt;0，则每一次epoch下沉数据量为sink_size的数据集。默认值：-1。</p></li>
</ul>
<p><strong>样例：</strong></p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">mindspore</span> <span class="kn">import</span> <span class="n">Model</span><span class="p">,</span> <span class="n">nn</span><span class="p">,</span> <span class="n">FixedLossScaleManager</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># For details about how to build the dataset, please refer to the tutorial</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># document on the official website.</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dataset</span> <span class="o">=</span> <span class="n">create_custom_dataset</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">net</span> <span class="o">=</span> <span class="n">Net</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">SoftmaxCrossEntropyWithLogits</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">loss_scale_manager</span> <span class="o">=</span> <span class="n">FixedLossScaleManager</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">optim</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">Momentum</span><span class="p">(</span><span class="n">params</span><span class="o">=</span><span class="n">net</span><span class="o">.</span><span class="n">trainable_params</span><span class="p">(),</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">net</span><span class="p">,</span> <span class="n">loss_fn</span><span class="o">=</span><span class="n">loss</span><span class="p">,</span> <span class="n">optimizer</span><span class="o">=</span><span class="n">optim</span><span class="p">,</span> <span class="n">metrics</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">loss_scale_manager</span><span class="o">=</span><span class="n">loss_scale_manager</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="n">dataset</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="method">
<dt id="mindspore.Model.train_network">
<em class="property">property </em><code class="sig-name descname">train_network</code><a class="headerlink" href="#mindspore.Model.train_network" title="Permalink to this definition">¶</a></dt>
<dd><p>获得该模型的训练网络。</p>
<p><strong>返回：</strong></p>
<p>预测网络实例。</p>
</dd></dl>

</dd></dl>

</div>


           </div>
           
          </div>
          <footer>
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
        <a href="mindspore.DatasetHelper.html" class="btn btn-neutral float-right" title="mindspore.DatasetHelper" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
        <a href="mindspore.get_seed.html" class="btn btn-neutral float-left" title="mindspore.get_seed" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
    </div>

  <hr/>

  <div role="contentinfo">
    <p>
        &#169; Copyright 2021, MindSpore.

    </p>
  </div>
    
    
    
    Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>
        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
   

</body>
</html>