

<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="zh" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="zh" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>Pruners &mdash; Optuna 1.4.0 文档</title>
  

  
  
    <link rel="shortcut icon" href="../_static/favicon.ico"/>
  
  
  

  
  <script type="text/javascript" src="../_static/js/modernizr.min.js"></script>
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
        <script src="../_static/jquery.js"></script>
        <script src="../_static/underscore.js"></script>
        <script src="../_static/doctools.js"></script>
        <script src="../_static/language_data.js"></script>
        <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
    
    <script type="text/javascript" src="../_static/js/theme.js"></script>

    

  
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/custom.css" type="text/css" />
    <link rel="index" title="索引" href="../genindex.html" />
    <link rel="search" title="搜索" href="../search.html" />
    <link rel="next" title="Samplers" href="samplers.html" />
    <link rel="prev" title="Visualization" href="multi_objective/visualization.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../index.html" class="icon icon-home"> Optuna
          

          
          </a>

          
            
            
              <div class="version">
                1.4.0
              </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">目录</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../installation.html">安装</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorial/index.html">教程</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="index.html">API Reference</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="core.html">Core</a></li>
<li class="toctree-l2"><a class="reference internal" href="cli.html">Command Line Interface</a></li>
<li class="toctree-l2"><a class="reference internal" href="distributions.html">Distributions</a></li>
<li class="toctree-l2"><a class="reference internal" href="exceptions.html">Exceptions</a></li>
<li class="toctree-l2"><a class="reference internal" href="importance.html">Hyperparameter Importance</a></li>
<li class="toctree-l2"><a class="reference internal" href="integration.html">Integration</a></li>
<li class="toctree-l2"><a class="reference internal" href="logging.html">Logging</a></li>
<li class="toctree-l2"><a class="reference internal" href="multi_objective/index.html">Multi-objective</a></li>
<li class="toctree-l2 current"><a class="current reference internal" href="#">Pruners</a></li>
<li class="toctree-l2"><a class="reference internal" href="samplers.html">Samplers</a></li>
<li class="toctree-l2"><a class="reference internal" href="storages.html">Storages</a></li>
<li class="toctree-l2"><a class="reference internal" href="structs.html">Structs</a></li>
<li class="toctree-l2"><a class="reference internal" href="study.html">Study</a></li>
<li class="toctree-l2"><a class="reference internal" href="trial.html">Trial</a></li>
<li class="toctree-l2"><a class="reference internal" href="visualization.html">Visualization</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../faq.html">常见问题</a></li>
</ul>

            
          
        </div>
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../index.html">Optuna</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../index.html">Docs</a> &raquo;</li>
        
          <li><a href="index.html">API Reference</a> &raquo;</li>
        
      <li>Pruners</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
            <a href="../_sources/reference/pruners.rst.txt" rel="nofollow"> View page source</a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <span class="target" id="module-optuna.pruners"></span><div class="section" id="pruners">
<h1>Pruners<a class="headerlink" href="#pruners" title="永久链接至标题">¶</a></h1>
<dl class="py class">
<dt id="optuna.pruners.BasePruner">
<em class="property">class </em><code class="sig-prename descclassname">optuna.pruners.</code><code class="sig-name descname">BasePruner</code><a class="reference internal" href="../_modules/optuna/pruners/_base.html#BasePruner"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#optuna.pruners.BasePruner" title="永久链接至目标">¶</a></dt>
<dd><p>Pruner 基类</p>
<dl class="py method">
<dt id="optuna.pruners.BasePruner.prune">
<em class="property">abstract </em><code class="sig-name descname">prune</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">study</span></em>, <em class="sig-param"><span class="n">trial</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/optuna/pruners/_base.html#BasePruner.prune"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#optuna.pruners.BasePruner.prune" title="永久链接至目标">¶</a></dt>
<dd><p>根据报告的值判断是否应对该 trial 剪枝。</p>
<p>注意，该方法不应由库用户调用。相反，<a class="reference internal" href="trial.html#optuna.trial.Trial.report" title="optuna.trial.Trial.report"><code class="xref py py-func docutils literal notranslate"><span class="pre">optuna.trial.Trial.report()</span></code></a> 和 <a class="reference internal" href="trial.html#optuna.trial.Trial.should_prune" title="optuna.trial.Trial.should_prune"><code class="xref py py-func docutils literal notranslate"><span class="pre">optuna.trial.Trial.should_prune()</span></code></a> 提供了接口给用户以在目标函数中实施剪枝机制。</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>study</strong> -- 目标 study 的 study 对象。</p></li>
<li><p><strong>trial</strong> -- 目标 trial 的 FrozenTrial 对象。 修改此对象之前需先对其进行复制操作。</p></li>
</ul>
</dd>
<dt class="field-even">返回</dt>
<dd class="field-even"><p>一个布尔值，表示是否应对该试验进行剪枝。</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

<dl class="py class">
<dt id="optuna.pruners.MedianPruner">
<em class="property">class </em><code class="sig-prename descclassname">optuna.pruners.</code><code class="sig-name descname">MedianPruner</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">n_startup_trials</span><span class="o">=</span><span class="default_value">5</span></em>, <em class="sig-param"><span class="n">n_warmup_steps</span><span class="o">=</span><span class="default_value">0</span></em>, <em class="sig-param"><span class="n">interval_steps</span><span class="o">=</span><span class="default_value">1</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/optuna/pruners/_median.html#MedianPruner"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#optuna.pruners.MedianPruner" title="永久链接至目标">¶</a></dt>
<dd><p>使用中值停止规则 的 pruner.</p>
<p>如果该试验的最佳中间结果比同一步骤中先前试验的中间结果的中值差，则进行剪枝。</p>
<p class="rubric">示例</p>
<p>我们用中值停止规则最小化目标函数。</p>
<div class="highlight-python3 notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">sklearn.datasets</span> <span class="kn">import</span> <span class="n">load_iris</span>
<span class="kn">from</span> <span class="nn">sklearn.linear_model</span> <span class="kn">import</span> <span class="n">SGDClassifier</span>
<span class="kn">from</span> <span class="nn">sklearn.model_selection</span> <span class="kn">import</span> <span class="n">train_test_split</span>

<span class="kn">import</span> <span class="nn">optuna</span>

<span class="n">X</span><span class="p">,</span> <span class="n">y</span> <span class="o">=</span> <span class="n">load_iris</span><span class="p">(</span><span class="n">return_X_y</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">X_train</span><span class="p">,</span> <span class="n">X_valid</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">y_valid</span> <span class="o">=</span> <span class="n">train_test_split</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span>
<span class="n">classes</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">unique</span><span class="p">(</span><span class="n">y</span><span class="p">)</span>

<span class="k">def</span> <span class="nf">objective</span><span class="p">(</span><span class="n">trial</span><span class="p">):</span>
    <span class="n">alpha</span> <span class="o">=</span> <span class="n">trial</span><span class="o">.</span><span class="n">suggest_uniform</span><span class="p">(</span><span class="s1">&#39;alpha&#39;</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span>
    <span class="n">clf</span> <span class="o">=</span> <span class="n">SGDClassifier</span><span class="p">(</span><span class="n">alpha</span><span class="o">=</span><span class="n">alpha</span><span class="p">)</span>
    <span class="n">n_train_iter</span> <span class="o">=</span> <span class="mi">100</span>

    <span class="k">for</span> <span class="n">step</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_train_iter</span><span class="p">):</span>
        <span class="n">clf</span><span class="o">.</span><span class="n">partial_fit</span><span class="p">(</span><span class="n">X_train</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">classes</span><span class="o">=</span><span class="n">classes</span><span class="p">)</span>

        <span class="n">intermediate_value</span> <span class="o">=</span> <span class="n">clf</span><span class="o">.</span><span class="n">score</span><span class="p">(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)</span>
        <span class="n">trial</span><span class="o">.</span><span class="n">report</span><span class="p">(</span><span class="n">intermediate_value</span><span class="p">,</span> <span class="n">step</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">trial</span><span class="o">.</span><span class="n">should_prune</span><span class="p">():</span>
            <span class="k">raise</span> <span class="n">optuna</span><span class="o">.</span><span class="n">TrialPruned</span><span class="p">()</span>

    <span class="k">return</span> <span class="n">clf</span><span class="o">.</span><span class="n">score</span><span class="p">(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)</span>

<span class="n">study</span> <span class="o">=</span> <span class="n">optuna</span><span class="o">.</span><span class="n">create_study</span><span class="p">(</span><span class="n">direction</span><span class="o">=</span><span class="s1">&#39;maximize&#39;</span><span class="p">,</span>
                            <span class="n">pruner</span><span class="o">=</span><span class="n">optuna</span><span class="o">.</span><span class="n">pruners</span><span class="o">.</span><span class="n">MedianPruner</span><span class="p">(</span><span class="n">n_startup_trials</span><span class="o">=</span><span class="mi">5</span><span class="p">,</span>
                                                               <span class="n">n_warmup_steps</span><span class="o">=</span><span class="mi">30</span><span class="p">,</span>
                                                               <span class="n">interval_steps</span><span class="o">=</span><span class="mi">10</span><span class="p">))</span>
<span class="n">study</span><span class="o">.</span><span class="n">optimize</span><span class="p">(</span><span class="n">objective</span><span class="p">,</span> <span class="n">n_trials</span><span class="o">=</span><span class="mi">20</span><span class="p">)</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>n_startup_trials</strong> -- 剪枝将被禁用，直到在同一 study 中完成给定的 trial 次数为止。</p></li>
<li><p><strong>n_warmup_steps</strong> -- 在 trial 超过给定步骤数之前，将禁用剪枝功能。</p></li>
<li><p><strong>interval_steps</strong> -- 不同剪枝检查之间的间隔步骤，预热步骤不算在其中。 如果在剪枝检查时未报告任何值，则该特定检查将被推迟，直到报告了一个值</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="py class">
<dt id="optuna.pruners.NopPruner">
<em class="property">class </em><code class="sig-prename descclassname">optuna.pruners.</code><code class="sig-name descname">NopPruner</code><a class="reference internal" href="../_modules/optuna/pruners/_nop.html#NopPruner"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#optuna.pruners.NopPruner" title="永久链接至目标">¶</a></dt>
<dd><p>不修剪 trial 的 pruner.</p>
<p class="rubric">示例</p>
<div class="highlight-python3 notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">sklearn.datasets</span> <span class="kn">import</span> <span class="n">load_iris</span>
<span class="kn">from</span> <span class="nn">sklearn.linear_model</span> <span class="kn">import</span> <span class="n">SGDClassifier</span>
<span class="kn">from</span> <span class="nn">sklearn.model_selection</span> <span class="kn">import</span> <span class="n">train_test_split</span>

<span class="kn">import</span> <span class="nn">optuna</span>

<span class="n">X</span><span class="p">,</span> <span class="n">y</span> <span class="o">=</span> <span class="n">load_iris</span><span class="p">(</span><span class="n">return_X_y</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">X_train</span><span class="p">,</span> <span class="n">X_valid</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">y_valid</span> <span class="o">=</span> <span class="n">train_test_split</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span>
<span class="n">classes</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">unique</span><span class="p">(</span><span class="n">y</span><span class="p">)</span>

<span class="k">def</span> <span class="nf">objective</span><span class="p">(</span><span class="n">trial</span><span class="p">):</span>
    <span class="n">alpha</span> <span class="o">=</span> <span class="n">trial</span><span class="o">.</span><span class="n">suggest_uniform</span><span class="p">(</span><span class="s1">&#39;alpha&#39;</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span>
    <span class="n">clf</span> <span class="o">=</span> <span class="n">SGDClassifier</span><span class="p">(</span><span class="n">alpha</span><span class="o">=</span><span class="n">alpha</span><span class="p">)</span>
    <span class="n">n_train_iter</span> <span class="o">=</span> <span class="mi">100</span>

    <span class="k">for</span> <span class="n">step</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_train_iter</span><span class="p">):</span>
        <span class="n">clf</span><span class="o">.</span><span class="n">partial_fit</span><span class="p">(</span><span class="n">X_train</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">classes</span><span class="o">=</span><span class="n">classes</span><span class="p">)</span>

        <span class="n">intermediate_value</span> <span class="o">=</span> <span class="n">clf</span><span class="o">.</span><span class="n">score</span><span class="p">(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)</span>
        <span class="n">trial</span><span class="o">.</span><span class="n">report</span><span class="p">(</span><span class="n">intermediate_value</span><span class="p">,</span> <span class="n">step</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">trial</span><span class="o">.</span><span class="n">should_prune</span><span class="p">():</span>
            <span class="k">assert</span> <span class="kc">False</span><span class="p">,</span> <span class="s2">&quot;should_prune() should always return False with this pruner.&quot;</span>
            <span class="k">raise</span> <span class="n">optuna</span><span class="o">.</span><span class="n">TrialPruned</span><span class="p">()</span>

    <span class="k">return</span> <span class="n">clf</span><span class="o">.</span><span class="n">score</span><span class="p">(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)</span>

<span class="n">study</span> <span class="o">=</span> <span class="n">optuna</span><span class="o">.</span><span class="n">create_study</span><span class="p">(</span><span class="n">direction</span><span class="o">=</span><span class="s1">&#39;maximize&#39;</span><span class="p">,</span>
                            <span class="n">pruner</span><span class="o">=</span><span class="n">optuna</span><span class="o">.</span><span class="n">pruners</span><span class="o">.</span><span class="n">NopPruner</span><span class="p">())</span>
<span class="n">study</span><span class="o">.</span><span class="n">optimize</span><span class="p">(</span><span class="n">objective</span><span class="p">,</span> <span class="n">n_trials</span><span class="o">=</span><span class="mi">20</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="py class">
<dt id="optuna.pruners.PercentilePruner">
<em class="property">class </em><code class="sig-prename descclassname">optuna.pruners.</code><code class="sig-name descname">PercentilePruner</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">percentile</span></em>, <em class="sig-param"><span class="n">n_startup_trials</span><span class="o">=</span><span class="default_value">5</span></em>, <em class="sig-param"><span class="n">n_warmup_steps</span><span class="o">=</span><span class="default_value">0</span></em>, <em class="sig-param"><span class="n">interval_steps</span><span class="o">=</span><span class="default_value">1</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/optuna/pruners/_percentile.html#PercentilePruner"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#optuna.pruners.PercentilePruner" title="永久链接至目标">¶</a></dt>
<dd><p>保留指定百分位 trial 的 pruner.</p>
<p>如果在同一步骤的 trial 中，最佳中间值位于最低百分位数，则进行剪枝。</p>
<p class="rubric">示例</p>
<div class="highlight-python3 notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">sklearn.datasets</span> <span class="kn">import</span> <span class="n">load_iris</span>
<span class="kn">from</span> <span class="nn">sklearn.linear_model</span> <span class="kn">import</span> <span class="n">SGDClassifier</span>
<span class="kn">from</span> <span class="nn">sklearn.model_selection</span> <span class="kn">import</span> <span class="n">train_test_split</span>

<span class="kn">import</span> <span class="nn">optuna</span>

<span class="n">X</span><span class="p">,</span> <span class="n">y</span> <span class="o">=</span> <span class="n">load_iris</span><span class="p">(</span><span class="n">return_X_y</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">X_train</span><span class="p">,</span> <span class="n">X_valid</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">y_valid</span> <span class="o">=</span> <span class="n">train_test_split</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span>
<span class="n">classes</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">unique</span><span class="p">(</span><span class="n">y</span><span class="p">)</span>

<span class="k">def</span> <span class="nf">objective</span><span class="p">(</span><span class="n">trial</span><span class="p">):</span>
    <span class="n">alpha</span> <span class="o">=</span> <span class="n">trial</span><span class="o">.</span><span class="n">suggest_uniform</span><span class="p">(</span><span class="s1">&#39;alpha&#39;</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span>
    <span class="n">clf</span> <span class="o">=</span> <span class="n">SGDClassifier</span><span class="p">(</span><span class="n">alpha</span><span class="o">=</span><span class="n">alpha</span><span class="p">)</span>
    <span class="n">n_train_iter</span> <span class="o">=</span> <span class="mi">100</span>

    <span class="k">for</span> <span class="n">step</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_train_iter</span><span class="p">):</span>
        <span class="n">clf</span><span class="o">.</span><span class="n">partial_fit</span><span class="p">(</span><span class="n">X_train</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">classes</span><span class="o">=</span><span class="n">classes</span><span class="p">)</span>

        <span class="n">intermediate_value</span> <span class="o">=</span> <span class="n">clf</span><span class="o">.</span><span class="n">score</span><span class="p">(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)</span>
        <span class="n">trial</span><span class="o">.</span><span class="n">report</span><span class="p">(</span><span class="n">intermediate_value</span><span class="p">,</span> <span class="n">step</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">trial</span><span class="o">.</span><span class="n">should_prune</span><span class="p">():</span>
            <span class="k">raise</span> <span class="n">optuna</span><span class="o">.</span><span class="n">TrialPruned</span><span class="p">()</span>

    <span class="k">return</span> <span class="n">clf</span><span class="o">.</span><span class="n">score</span><span class="p">(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)</span>

<span class="n">study</span> <span class="o">=</span> <span class="n">optuna</span><span class="o">.</span><span class="n">create_study</span><span class="p">(</span>
    <span class="n">direction</span><span class="o">=</span><span class="s1">&#39;maximize&#39;</span><span class="p">,</span>
    <span class="n">pruner</span><span class="o">=</span><span class="n">optuna</span><span class="o">.</span><span class="n">pruners</span><span class="o">.</span><span class="n">PercentilePruner</span><span class="p">(</span><span class="mf">25.0</span><span class="p">,</span> <span class="n">n_startup_trials</span><span class="o">=</span><span class="mi">5</span><span class="p">,</span>
                                           <span class="n">n_warmup_steps</span><span class="o">=</span><span class="mi">30</span><span class="p">,</span> <span class="n">interval_steps</span><span class="o">=</span><span class="mi">10</span><span class="p">))</span>
<span class="n">study</span><span class="o">.</span><span class="n">optimize</span><span class="p">(</span><span class="n">objective</span><span class="p">,</span> <span class="n">n_trials</span><span class="o">=</span><span class="mi">20</span><span class="p">)</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>percentile</strong> -- 百分位数必须介于 0 到 100 之间（例如，如果给定 25.0，保留第25个百分位数 trial 的顶部）。</p></li>
<li><p><strong>n_startup_trials</strong> -- 剪枝将被禁用，直到在同一 study 中完成给定的 trial 次数为止。</p></li>
<li><p><strong>n_warmup_steps</strong> -- 在 trial 超过给定步骤数之前，将禁用剪枝功能。</p></li>
<li><p><strong>interval_steps</strong> -- 不同剪枝检查之间的间隔步骤，预热步骤不算在其中。 如果在剪枝检查时未报告任何值，则该特定检查将被推迟，直到报告了一个值。该值最小为1.</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="py class">
<dt id="optuna.pruners.SuccessiveHalvingPruner">
<em class="property">class </em><code class="sig-prename descclassname">optuna.pruners.</code><code class="sig-name descname">SuccessiveHalvingPruner</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">min_resource</span><span class="o">=</span><span class="default_value">'auto'</span></em>, <em class="sig-param"><span class="n">reduction_factor</span><span class="o">=</span><span class="default_value">4</span></em>, <em class="sig-param"><span class="n">min_early_stopping_rate</span><span class="o">=</span><span class="default_value">0</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/optuna/pruners/_successive_halving.html#SuccessiveHalvingPruner"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#optuna.pruners.SuccessiveHalvingPruner" title="永久链接至目标">¶</a></dt>
<dd><p>使用异步连续减半算法的 pruner.</p>
<p><a class="reference external" href="https://arxiv.org/abs/1502.07943">Successive Halving</a> 是一种基于 bandit 的算法，用于识别多种配置中的最佳配置。该类实现了异步版本的 Successive Halving 详细描述请参考 <a class="reference external" href="http://arxiv.org/abs/1810.05934">Asynchronous Successive Halving</a></p>
<p>注意，该类并不关心最大资源的参数（文中以 <span class="math notranslate nohighlight">\(R\)</span> 来表示）。给每个 trial 分配的最大资源限制通常是在目标函数内部完成的。(e.g., <a class="reference external" href="https://github.com/optuna/optuna/tree/c5777b3e/examples/pruning/simple.py#L31">simple.py</a> 的 <code class="docutils literal notranslate"><span class="pre">step</span></code> 和 <cite>chainer_integration.py ` 中的 ``EPOCH`</cite>.</p>
<p class="rubric">示例</p>
<p>我们使用 <code class="docutils literal notranslate"><span class="pre">SuccessiveHalvingPruner</span></code> 来最小化目标函数。</p>
<div class="highlight-python3 notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">sklearn.datasets</span> <span class="kn">import</span> <span class="n">load_iris</span>
<span class="kn">from</span> <span class="nn">sklearn.linear_model</span> <span class="kn">import</span> <span class="n">SGDClassifier</span>
<span class="kn">from</span> <span class="nn">sklearn.model_selection</span> <span class="kn">import</span> <span class="n">train_test_split</span>

<span class="kn">import</span> <span class="nn">optuna</span>

<span class="n">X</span><span class="p">,</span> <span class="n">y</span> <span class="o">=</span> <span class="n">load_iris</span><span class="p">(</span><span class="n">return_X_y</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">X_train</span><span class="p">,</span> <span class="n">X_valid</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">y_valid</span> <span class="o">=</span> <span class="n">train_test_split</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span>
<span class="n">classes</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">unique</span><span class="p">(</span><span class="n">y</span><span class="p">)</span>

<span class="k">def</span> <span class="nf">objective</span><span class="p">(</span><span class="n">trial</span><span class="p">):</span>
    <span class="n">alpha</span> <span class="o">=</span> <span class="n">trial</span><span class="o">.</span><span class="n">suggest_uniform</span><span class="p">(</span><span class="s1">&#39;alpha&#39;</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span>
    <span class="n">clf</span> <span class="o">=</span> <span class="n">SGDClassifier</span><span class="p">(</span><span class="n">alpha</span><span class="o">=</span><span class="n">alpha</span><span class="p">)</span>
    <span class="n">n_train_iter</span> <span class="o">=</span> <span class="mi">100</span>

    <span class="k">for</span> <span class="n">step</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_train_iter</span><span class="p">):</span>
        <span class="n">clf</span><span class="o">.</span><span class="n">partial_fit</span><span class="p">(</span><span class="n">X_train</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">classes</span><span class="o">=</span><span class="n">classes</span><span class="p">)</span>

        <span class="n">intermediate_value</span> <span class="o">=</span> <span class="n">clf</span><span class="o">.</span><span class="n">score</span><span class="p">(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)</span>
        <span class="n">trial</span><span class="o">.</span><span class="n">report</span><span class="p">(</span><span class="n">intermediate_value</span><span class="p">,</span> <span class="n">step</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">trial</span><span class="o">.</span><span class="n">should_prune</span><span class="p">():</span>
            <span class="k">raise</span> <span class="n">optuna</span><span class="o">.</span><span class="n">TrialPruned</span><span class="p">()</span>

    <span class="k">return</span> <span class="n">clf</span><span class="o">.</span><span class="n">score</span><span class="p">(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)</span>

<span class="n">study</span> <span class="o">=</span> <span class="n">optuna</span><span class="o">.</span><span class="n">create_study</span><span class="p">(</span><span class="n">direction</span><span class="o">=</span><span class="s1">&#39;maximize&#39;</span><span class="p">,</span>
                            <span class="n">pruner</span><span class="o">=</span><span class="n">optuna</span><span class="o">.</span><span class="n">pruners</span><span class="o">.</span><span class="n">SuccessiveHalvingPruner</span><span class="p">())</span>
<span class="n">study</span><span class="o">.</span><span class="n">optimize</span><span class="p">(</span><span class="n">objective</span><span class="p">,</span> <span class="n">n_trials</span><span class="o">=</span><span class="mi">20</span><span class="p">)</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>min_resource</strong> -- 用于指定分配给单个 trial 的最小资源 (在 <a class="reference external" href="http://arxiv.org/abs/1810.05934">paper</a> 中 该参数是  <span class="math notranslate nohighlight">\(r\)</span>).默认情况下，该参数是 'auto', 此时，其值是由一个启发式算法设定的，该算法会观察完成第一个 trial 所需要的步数。除非执行了 <span class="math notranslate nohighlight">\(\mathsf{min}\_\mathsf{resource} \times \mathsf{reduction}\_\mathsf{factor}^{ \mathsf{min}\_\mathsf{early}\_\mathsf{stopping}\_\mathsf{rate}}\)</span> 步 (也就是第一级的完成点), 否则该 trial 不会被剪枝。 如果一个 trial 完成了第一级，它不会跳入下一级，除非该 trial 的值 处于所有已抵达该点（否则就被剪枝了）的trial <span class="math notranslate nohighlight">\({1 \over \mathsf{reduction}\_\mathsf{factor}}\)</span> 的顶部。如果该 trial 赢得了竞争，他就会一只运行知道下一个完成点到来 (也就是 <span class="math notranslate nohighlight">\(\mathsf{min}\_\mathsf{resource} \times \mathsf{reduction}\_\mathsf{factor}^{ (\mathsf{min}\_\mathsf{early}\_\mathsf{stopping}\_\mathsf{rate} + \mathsf{rung})}\)</span> 步) 并且重复上一个过程。 .. note::     如果每个 trial 的最后中间值的步骤不同的话，请     手动设定最小的可能步骤 <code class="docutils literal notranslate"><span class="pre">min_resource</span></code>.</p></li>
<li><p><strong>reduction_factor</strong> -- 用于设置可提升 trial 的缩减因子参数（在 <a class="reference external" href="http://arxiv.org/abs/1810.05934">论文</a> 中该参数是 <span class="math notranslate nohighlight">\(\eta\)</span> ）。在每一级的完成点，大约有 <span class="math notranslate nohighlight">\({1 \over \mathsf{reduction}\_\mathsf{factor}}\)</span> 的 trial会被提升。</p></li>
<li><p><strong>min_early_stopping_rate</strong> -- 用于确定最小提前终止率的参数。 (在`论文 &lt;<a class="reference external" href="http://arxiv.org/abs/1810.05934">http://arxiv.org/abs/1810.05934</a>&gt;`_ 中它是 <span class="math notranslate nohighlight">\(s\)</span>).</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="py class">
<dt id="optuna.pruners.HyperbandPruner">
<em class="property">class </em><code class="sig-prename descclassname">optuna.pruners.</code><code class="sig-name descname">HyperbandPruner</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">min_resource</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.8)">int</a></span> <span class="o">=</span> <span class="default_value">1</span></em>, <em class="sig-param"><span class="n">max_resource</span><span class="p">:</span> <span class="n">Union<span class="p">[</span><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(在 Python v3.8)">str</a><span class="p">, </span><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.8)">int</a><span class="p">]</span></span> <span class="o">=</span> <span class="default_value">'auto'</span></em>, <em class="sig-param"><span class="n">reduction_factor</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.8)">int</a></span> <span class="o">=</span> <span class="default_value">3</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/optuna/pruners/_hyperband.html#HyperbandPruner"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#optuna.pruners.HyperbandPruner" title="永久链接至目标">¶</a></dt>
<dd><p>使用 hyperband 的 pruner.</p>
<p>由于 SuccessiveHalving (SHA) 需要配置的总数 <span class="math notranslate nohighlight">\(n\)</span> 作为它的超参数，对于一个有限的预算 <span class="math notranslate nohighlight">\(B\)</span> 来说，平均来讲所有的配置都有 <span class="math notranslate nohighlight">\(B \over n\)</span> 的资源。如你所见，这中间存在一个 <span class="math notranslate nohighlight">\(B\)</span>  和 <span class="math notranslate nohighlight">\(B \over n\)</span> 之间的权衡。通过对于固定的预算尝试不同的  <span class="math notranslate nohighlight">\(n\)</span>, &lt;<a class="reference external" href="http://www.jmlr.org/papers/volume18/16-558/16-558.pdf">http://www.jmlr.org/papers/volume18/16-558/16-558.pdf</a>&gt;`_ 研究了这种权衡</p>
<div class="admonition note">
<p class="admonition-title">注解</p>
<ul class="simple">
<li><p>Hyperband 论文中用的是 <a class="reference internal" href="samplers.html#optuna.samplers.RandomSampler" title="optuna.samplers.RandomSampler"><code class="xref py py-class docutils literal notranslate"><span class="pre">RandomSampler</span></code></a> 的对应物</p></li>
<li><p>默认情况下，Optuna 使用 <a class="reference internal" href="samplers.html#optuna.samplers.TPESampler" title="optuna.samplers.TPESampler"><code class="xref py py-class docutils literal notranslate"><span class="pre">TPESampler</span></code></a>.</p></li>
<li><p><a class="reference external" href="https://github.com/optuna/optuna/pull/828#issuecomment-575457360">基准测试</a> 表明，<a class="reference internal" href="#optuna.pruners.HyperbandPruner" title="optuna.pruners.HyperbandPruner"><code class="xref py py-class docutils literal notranslate"><span class="pre">optuna.pruners.HyperbandPruner</span></code></a> 支持两种 sampler.</p></li>
</ul>
</div>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>如果你配合 <a class="reference internal" href="samplers.html#optuna.samplers.TPESampler" title="optuna.samplers.TPESampler"><code class="xref py py-class docutils literal notranslate"><span class="pre">TPESampler</span></code></a> 使用 <code class="docutils literal notranslate"><span class="pre">HyperbandPruner</span></code>, 我们推荐你考虑设置更大的 <code class="docutils literal notranslate"><span class="pre">n_trials</span></code> 或者 <code class="docutils literal notranslate"><span class="pre">timeout</span></code> 来充分利用 <a class="reference internal" href="samplers.html#optuna.samplers.TPESampler" title="optuna.samplers.TPESampler"><code class="xref py py-class docutils literal notranslate"><span class="pre">TPESampler</span></code></a> 的特性，因为，<a class="reference internal" href="samplers.html#optuna.samplers.TPESampler" title="optuna.samplers.TPESampler"><code class="xref py py-class docutils literal notranslate"><span class="pre">TPESampler</span></code></a> 在开始时使用了一些  (默认情况下是 <span class="math notranslate nohighlight">\(10\)</span>) <a class="reference internal" href="trial.html#optuna.trial.Trial" title="optuna.trial.Trial"><code class="xref py py-class docutils literal notranslate"><span class="pre">Trial</span></code></a>.</p>
<p>由于 Hyperband 运行多个 <a class="reference internal" href="#optuna.pruners.SuccessiveHalvingPruner" title="optuna.pruners.SuccessiveHalvingPruner"><code class="xref py py-class docutils literal notranslate"><span class="pre">SuccessiveHalvingPruner</span></code></a> 并且基于当前 <a class="reference internal" href="trial.html#optuna.trial.Trial" title="optuna.trial.Trial"><code class="xref py py-class docutils literal notranslate"><span class="pre">Trial</span></code></a> 的 bracket ID 来收集 trial, 每一个 bracket 需要观察起码 <span class="math notranslate nohighlight">\(10\)</span> 个 <a class="reference internal" href="trial.html#optuna.trial.Trial" title="optuna.trial.Trial"><code class="xref py py-class docutils literal notranslate"><span class="pre">Trial</span></code></a>好让 <a class="reference internal" href="samplers.html#optuna.samplers.TPESampler" title="optuna.samplers.TPESampler"><code class="xref py py-class docutils literal notranslate"><span class="pre">TPESampler</span></code></a> 来调整它的搜索空间。</p>
<p>因此，如果  <code class="docutils literal notranslate"><span class="pre">HyperbandPruner</span></code>  有  <span class="math notranslate nohighlight">\(4\)</span> 个 pruner 的话，在一开始时他就会用掉起码 <span class="math notranslate nohighlight">\(4 \times 10\)</span> 个 trial.</p>
</div>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>Hyperband 有好几个 <a class="reference internal" href="#optuna.pruners.SuccessiveHalvingPruner" title="optuna.pruners.SuccessiveHalvingPruner"><code class="xref py py-class docutils literal notranslate"><span class="pre">SuccessiveHalvingPruner</span></code></a>.每一个 <a class="reference internal" href="#optuna.pruners.SuccessiveHalvingPruner" title="optuna.pruners.SuccessiveHalvingPruner"><code class="xref py py-class docutils literal notranslate"><span class="pre">SuccessiveHalvingPruner</span></code></a> 都是原文中的 &quot;bracket&quot;. bracket 的数目是控制 Hyperband 提前终止行为的一个重要因子，它是由 <code class="docutils literal notranslate"><span class="pre">min_resource</span></code>, <code class="docutils literal notranslate"><span class="pre">max_resource</span></code> 和 <code class="docutils literal notranslate"><span class="pre">reduction_factor</span></code> 共同确定，形式是 <cite>The number of brackets = floor(log_{reduction_factor}(max_resource / min_resource)) + 1</cite>.请设置 <code class="docutils literal notranslate"><span class="pre">reduction_factor</span></code> 以让 bracket 的数目不要太大 (一般情况下是 4 ~ 6 )。更多细节请参考 <a class="reference external" href="http://www.jmlr.org/papers/volume18/16-558/16-558.pdf">原始论文</a> 中的 Section 3.6.</p>
</div>
<p class="rubric">示例</p>
<p>我们使用 Hyperband 剪枝算法来最小化目标函数。</p>
<div class="highlight-python3 notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">sklearn.datasets</span> <span class="kn">import</span> <span class="n">load_iris</span>
<span class="kn">from</span> <span class="nn">sklearn.linear_model</span> <span class="kn">import</span> <span class="n">SGDClassifier</span>
<span class="kn">from</span> <span class="nn">sklearn.model_selection</span> <span class="kn">import</span> <span class="n">train_test_split</span>

<span class="kn">import</span> <span class="nn">optuna</span>

<span class="n">X</span><span class="p">,</span> <span class="n">y</span> <span class="o">=</span> <span class="n">load_iris</span><span class="p">(</span><span class="n">return_X_y</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">X_train</span><span class="p">,</span> <span class="n">X_test</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">y_test</span> <span class="o">=</span> <span class="n">train_test_split</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span>
<span class="n">classes</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">unique</span><span class="p">(</span><span class="n">y</span><span class="p">)</span>
<span class="n">n_train_iter</span> <span class="o">=</span> <span class="mi">100</span>

<span class="k">def</span> <span class="nf">objective</span><span class="p">(</span><span class="n">trial</span><span class="p">):</span>
    <span class="n">alpha</span> <span class="o">=</span> <span class="n">trial</span><span class="o">.</span><span class="n">suggest_uniform</span><span class="p">(</span><span class="s1">&#39;alpha&#39;</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span>
    <span class="n">clf</span> <span class="o">=</span> <span class="n">SGDClassifier</span><span class="p">(</span><span class="n">alpha</span><span class="o">=</span><span class="n">alpha</span><span class="p">)</span>

    <span class="k">for</span> <span class="n">step</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_train_iter</span><span class="p">):</span>
        <span class="n">clf</span><span class="o">.</span><span class="n">partial_fit</span><span class="p">(</span><span class="n">X_train</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">classes</span><span class="o">=</span><span class="n">classes</span><span class="p">)</span>

        <span class="n">intermediate_value</span> <span class="o">=</span> <span class="n">clf</span><span class="o">.</span><span class="n">score</span><span class="p">(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)</span>
        <span class="n">trial</span><span class="o">.</span><span class="n">report</span><span class="p">(</span><span class="n">intermediate_value</span><span class="p">,</span> <span class="n">step</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">trial</span><span class="o">.</span><span class="n">should_prune</span><span class="p">():</span>
            <span class="k">raise</span> <span class="n">optuna</span><span class="o">.</span><span class="n">TrialPruned</span><span class="p">()</span>

    <span class="k">return</span> <span class="n">clf</span><span class="o">.</span><span class="n">score</span><span class="p">(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)</span>

<span class="n">study</span> <span class="o">=</span> <span class="n">optuna</span><span class="o">.</span><span class="n">create_study</span><span class="p">(</span>
    <span class="n">direction</span><span class="o">=</span><span class="s1">&#39;maximize&#39;</span><span class="p">,</span>
    <span class="n">pruner</span><span class="o">=</span><span class="n">optuna</span><span class="o">.</span><span class="n">pruners</span><span class="o">.</span><span class="n">HyperbandPruner</span><span class="p">(</span>
        <span class="n">min_resource</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
        <span class="n">max_resource</span><span class="o">=</span><span class="n">n_train_iter</span><span class="p">,</span>
        <span class="n">reduction_factor</span><span class="o">=</span><span class="mi">3</span>
    <span class="p">)</span>
<span class="p">)</span>
<span class="n">study</span><span class="o">.</span><span class="n">optimize</span><span class="p">(</span><span class="n">objective</span><span class="p">,</span> <span class="n">n_trials</span><span class="o">=</span><span class="mi">20</span><span class="p">)</span>
</pre></div>
</div>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>min_resource</strong> -- 用于指定分配给单个 trial 的最小资源 (在 原文中 该参数是  <span class="math notranslate nohighlight">\(r\)</span>).一个更小的 <span class="math notranslate nohighlight">\(r\)</span> 会更快返回结果，但是 一个更大的 <span class="math notranslate nohighlight">\(r\)</span> 能保证更佳的不同配置之间的判定。具体细节见 <a class="reference internal" href="#optuna.pruners.SuccessiveHalvingPruner" title="optuna.pruners.SuccessiveHalvingPruner"><code class="xref py py-class docutils literal notranslate"><span class="pre">SuccessiveHalvingPruner</span></code></a>.</p></li>
<li><p><strong>max_resource</strong> -- 用于指定分配给一个 trial 的最大资源参数。原文中的 <span class="math notranslate nohighlight">\(R\)</span> 对应着 <code class="docutils literal notranslate"><span class="pre">max_resource</span> <span class="pre">/</span> <span class="pre">min_resource</span></code>. 该值代表着且应符合最大迭代步数（也就是一个神经网络的 epoch 数）。当该参数是 &quot;auto&quot; 时，最大资源是根据已经完成的 trial 来预估的。  .. note::     &quot;设置成  &quot;auto&quot; 时，最大资源将是头一个（如果在并行优化的情况下，就是头几个中的一个）已完成的 trial 的 <a class="reference internal" href="trial.html#optuna.trial.Trial.report" title="optuna.trial.Trial.report"><code class="xref py py-meth docutils literal notranslate"><span class="pre">report()</span></code></a> 报告的最大步数。如果每个 trial 的最后中间值的步骤不同的话，请     手动设定最大的可能步骤 <code class="docutils literal notranslate"><span class="pre">max_resource</span></code>.</p></li>
<li><p><strong>reduction_factor</strong> -- A parameter for specifying reduction factor of promotable trials noted as
<span class="math notranslate nohighlight">\(\eta\)</span> in the paper.
See the details for <a class="reference internal" href="#optuna.pruners.SuccessiveHalvingPruner" title="optuna.pruners.SuccessiveHalvingPruner"><code class="xref py py-class docutils literal notranslate"><span class="pre">SuccessiveHalvingPruner</span></code></a>.</p></li>
</ul>
</dd>
</dl>
<div class="admonition note">
<p class="admonition-title">注解</p>
<p>在 v1.1.0 版本中作为一个试验特性被加入的，该接口可能在新版中在没有提前告知的情况下改变。参见 <a class="reference external" href="https://github.com/optuna/optuna/releases/tag/v1.1.0">https://github.com/optuna/optuna/releases/tag/v1.1.0</a>.</p>
</div>
</dd></dl>

<dl class="py class">
<dt id="optuna.pruners.ThresholdPruner">
<em class="property">class </em><code class="sig-prename descclassname">optuna.pruners.</code><code class="sig-name descname">ThresholdPruner</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">lower</span><span class="p">:</span> <span class="n">Optional<span class="p">[</span><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.8)">float</a><span class="p">]</span></span> <span class="o">=</span> <span class="default_value">None</span></em>, <em class="sig-param"><span class="n">upper</span><span class="p">:</span> <span class="n">Optional<span class="p">[</span><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(在 Python v3.8)">float</a><span class="p">]</span></span> <span class="o">=</span> <span class="default_value">None</span></em>, <em class="sig-param"><span class="n">n_warmup_steps</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.8)">int</a></span> <span class="o">=</span> <span class="default_value">0</span></em>, <em class="sig-param"><span class="n">interval_steps</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(在 Python v3.8)">int</a></span> <span class="o">=</span> <span class="default_value">1</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/optuna/pruners/_threshold.html#ThresholdPruner"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#optuna.pruners.ThresholdPruner" title="永久链接至目标">¶</a></dt>
<dd><p>用于检测 trial 的无关度量的 pruner.</p>
<p>如果一个度量超过了上限，低于下限或者变成了 <code class="docutils literal notranslate"><span class="pre">nan</span></code>, 就剪枝。</p>
<p class="rubric">示例</p>
<div class="highlight-python3 notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">optuna</span> <span class="kn">import</span> <span class="n">create_study</span>
<span class="kn">from</span> <span class="nn">optuna.pruners</span> <span class="kn">import</span> <span class="n">ThresholdPruner</span>
<span class="kn">from</span> <span class="nn">optuna</span> <span class="kn">import</span> <span class="n">TrialPruned</span>

<span class="k">def</span> <span class="nf">objective_for_upper</span><span class="p">(</span><span class="n">trial</span><span class="p">):</span>
    <span class="k">for</span> <span class="n">step</span><span class="p">,</span> <span class="n">y</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">ys_for_upper</span><span class="p">):</span>
        <span class="n">trial</span><span class="o">.</span><span class="n">report</span><span class="p">(</span><span class="n">y</span><span class="p">,</span> <span class="n">step</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">trial</span><span class="o">.</span><span class="n">should_prune</span><span class="p">():</span>
            <span class="k">raise</span> <span class="n">TrialPruned</span><span class="p">()</span>
    <span class="k">return</span> <span class="n">ys_for_upper</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>


<span class="k">def</span> <span class="nf">objective_for_lower</span><span class="p">(</span><span class="n">trial</span><span class="p">):</span>
    <span class="k">for</span> <span class="n">step</span><span class="p">,</span> <span class="n">y</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">ys_for_lower</span><span class="p">):</span>
        <span class="n">trial</span><span class="o">.</span><span class="n">report</span><span class="p">(</span><span class="n">y</span><span class="p">,</span> <span class="n">step</span><span class="p">)</span>

        <span class="k">if</span> <span class="n">trial</span><span class="o">.</span><span class="n">should_prune</span><span class="p">():</span>
            <span class="k">raise</span> <span class="n">TrialPruned</span><span class="p">()</span>
    <span class="k">return</span> <span class="n">ys_for_lower</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span>


<span class="n">ys_for_upper</span> <span class="o">=</span> <span class="p">[</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">0.1</span><span class="p">,</span> <span class="mf">0.2</span><span class="p">,</span> <span class="mf">0.5</span><span class="p">,</span> <span class="mf">1.2</span><span class="p">]</span>
<span class="n">ys_for_lower</span> <span class="o">=</span> <span class="p">[</span><span class="mf">100.0</span><span class="p">,</span> <span class="mf">90.0</span><span class="p">,</span> <span class="mf">0.1</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">]</span>
<span class="n">n_trial_step</span> <span class="o">=</span> <span class="mi">5</span>

<span class="n">study</span> <span class="o">=</span> <span class="n">create_study</span><span class="p">(</span><span class="n">pruner</span><span class="o">=</span><span class="n">ThresholdPruner</span><span class="p">(</span><span class="n">upper</span><span class="o">=</span><span class="mf">1.0</span><span class="p">))</span>
<span class="n">study</span><span class="o">.</span><span class="n">optimize</span><span class="p">(</span><span class="n">objective_for_upper</span><span class="p">,</span> <span class="n">n_trials</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span>

<span class="n">study</span> <span class="o">=</span> <span class="n">create_study</span><span class="p">(</span><span class="n">pruner</span><span class="o">=</span><span class="n">ThresholdPruner</span><span class="p">(</span><span class="n">lower</span><span class="o">=</span><span class="mf">0.0</span><span class="p">))</span>
<span class="n">study</span><span class="o">.</span><span class="n">optimize</span><span class="p">(</span><span class="n">objective_for_lower</span><span class="p">,</span> <span class="n">n_trials</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span>
</pre></div>
</div>
<dl class="simple">
<dt>Args</dt><dd><dl class="simple">
<dt>lower:</dt><dd><p>一个确定 pruner 是否需要剪枝的最小值。如果某个中间值小于此值，就剪枝。</p>
</dd>
<dt>upper:</dt><dd><p>一个确定 pruner 是否需要剪枝的最大值。如果某个中间值大于此值，就剪枝。</p>
</dd>
<dt>n_warmup_steps:</dt><dd><p>在 trial 超过给定步骤数之前，将禁用剪枝功能。</p>
</dd>
<dt>interval_steps:</dt><dd><p>不同剪枝检查之间的间隔步骤，预热步骤不算在其中。 如果在剪枝检查时未报告任何值，则该特定检查将被推迟，直到报告了一个值。该值最小为1.</p>
</dd>
</dl>
</dd>
</dl>
</dd></dl>

</div>


           </div>
           
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="samplers.html" class="btn btn-neutral float-right" title="Samplers" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="multi_objective/visualization.html" class="btn btn-neutral float-left" title="Visualization" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> Previous</a>
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <p>
        &copy; Copyright 2018, Optuna Contributors.

    </p>
  </div>
  Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
    <a href="../privacy.html">Privacy Policy</a>.
     


</footer>

        </div>
      </div>

    </section>

  </div>
  


  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
    <!-- Theme Analytics -->
    <script>
    (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
      (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
      m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
    })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

    ga('create', 'UA-55135190-8', 'auto');
    ga('send', 'pageview');
    </script>

    
   

</body>
</html>