<!DOCTYPE html>
<html lang="en-us">
  <head>

    <meta http-equiv="content-type" content="text/html; charset=utf-8">
    
<meta charset="UTF-8">
<title>Cluster-level shard allocation and routing settings | Elasticsearch Guide [7.7] | Elastic</title>
<link rel="home" href="index.html" title="Elasticsearch Guide [7.7]">
<link rel="up" href="settings.html" title="Configuring Elasticsearch">
<link rel="prev" href="circuit-breaker.html" title="Circuit breaker settings">
<link rel="next" href="ccr-settings.html" title="Cross-cluster replication settings">
<meta name="DC.type" content="Learn/Docs/Elasticsearch/Reference/7.7">
<meta name="DC.subject" content="Elasticsearch">
<meta name="DC.identifier" content="7.7">
<meta name="robots" content="noindex,nofollow">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <meta name="viewport" content="width=device-width, initial-scale=1">
    <script src="https://cdn.optimizely.com/js/18132920325.js"></script>
    <link rel="apple-touch-icon" sizes="57x57" href="/apple-icon-57x57.png">
    <link rel="apple-touch-icon" sizes="60x60" href="/apple-icon-60x60.png">
    <link rel="apple-touch-icon" sizes="72x72" href="/apple-icon-72x72.png">
    <link rel="apple-touch-icon" sizes="76x76" href="/apple-icon-76x76.png">
    <link rel="apple-touch-icon" sizes="114x114" href="/apple-icon-114x114.png">
    <link rel="apple-touch-icon" sizes="120x120" href="/apple-icon-120x120.png">
    <link rel="apple-touch-icon" sizes="144x144" href="/apple-icon-144x144.png">
    <link rel="apple-touch-icon" sizes="152x152" href="/apple-icon-152x152.png">
    <link rel="apple-touch-icon" sizes="180x180" href="/apple-icon-180x180.png">
    <link rel="icon" type="image/png" href="/favicon-32x32.png" sizes="32x32">
    <link rel="icon" type="image/png" href="/android-chrome-192x192.png" sizes="192x192">
    <link rel="icon" type="image/png" href="/favicon-96x96.png" sizes="96x96">
    <link rel="icon" type="image/png" href="/favicon-16x16.png" sizes="16x16">
    <link rel="manifest" href="/manifest.json">
    <meta name="apple-mobile-web-app-title" content="Elastic">
    <meta name="application-name" content="Elastic">
    <meta name="msapplication-TileColor" content="#ffffff">
    <meta name="msapplication-TileImage" content="/mstile-144x144.png">
    <meta name="theme-color" content="#ffffff">
    <meta name="naver-site-verification" content="936882c1853b701b3cef3721758d80535413dbfd">
    <meta name="yandex-verification" content="d8a47e95d0972434">
    <meta name="localized" content="true">
    <meta name="st:robots" content="follow,index">
    <meta property="og:image" content="https://www.elastic.co/static/images/elastic-logo-200.png">
    <link rel="shortcut icon" href="/favicon.ico" type="image/x-icon">
    <link rel="icon" href="/favicon.ico" type="image/x-icon">
    <link rel="apple-touch-icon-precomposed" sizes="64x64" href="/favicon_64x64_16bit.png">
    <link rel="apple-touch-icon-precomposed" sizes="32x32" href="/favicon_32x32.png">
    <link rel="apple-touch-icon-precomposed" sizes="16x16" href="/favicon_16x16.png">
    <!-- Give IE8 a fighting chance -->
    <!--[if lt IE 9]>
    <script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
    <script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
    <![endif]-->
    <link rel="stylesheet" type="text/css" href="/guide/static/styles.css">
  </head>

  <!--© 2015-2021 Elasticsearch B.V. Copying, publishing and/or distributing without written permission is strictly prohibited.-->

  <body>
    <!-- Google Tag Manager -->
    <script>dataLayer = [];</script><noscript><iframe src="//www.googletagmanager.com/ns.html?id=GTM-58RLH5" height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript>
    <script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start': new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0], j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= '//www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); })(window,document,'script','dataLayer','GTM-58RLH5');</script>
    <!-- End Google Tag Manager -->

    <!-- Global site tag (gtag.js) - Google Analytics -->
    <script async src="https://www.googletagmanager.com/gtag/js?id=UA-12395217-16"></script>
    <script>
      window.dataLayer = window.dataLayer || [];
      function gtag(){dataLayer.push(arguments);}
      gtag('js', new Date());
      gtag('config', 'UA-12395217-16');
    </script>

    <!--BEGIN QUALTRICS WEBSITE FEEDBACK SNIPPET-->
    <script type="text/javascript">
      (function(){var g=function(e,h,f,g){
      this.get=function(a){for(var a=a+"=",c=document.cookie.split(";"),b=0,e=c.length;b<e;b++){for(var d=c[b];" "==d.charAt(0);)d=d.substring(1,d.length);if(0==d.indexOf(a))return d.substring(a.length,d.length)}return null};
      this.set=function(a,c){var b="",b=new Date;b.setTime(b.getTime()+6048E5);b="; expires="+b.toGMTString();document.cookie=a+"="+c+b+"; path=/; "};
      this.check=function(){var a=this.get(f);if(a)a=a.split(":");else if(100!=e)"v"==h&&(e=Math.random()>=e/100?0:100),a=[h,e,0],this.set(f,a.join(":"));else return!0;var c=a[1];if(100==c)return!0;switch(a[0]){case "v":return!1;case "r":return c=a[2]%Math.floor(100/c),a[2]++,this.set(f,a.join(":")),!c}return!0};
      this.go=function(){if(this.check()){var a=document.createElement("script");a.type="text/javascript";a.src=g;document.body&&document.body.appendChild(a)}};
      this.start=function(){var a=this;window.addEventListener?window.addEventListener("load",function(){a.go()},!1):window.attachEvent&&window.attachEvent("onload",function(){a.go()})}};
      try{(new g(100,"r","QSI_S_ZN_emkP0oSe9Qrn7kF","https://znemkp0ose9qrn7kf-elastic.siteintercept.qualtrics.com/WRSiteInterceptEngine/?Q_ZID=ZN_emkP0oSe9Qrn7kF")).start()}catch(i){}})();
    </script><div id="ZN_emkP0oSe9Qrn7kF"><!--DO NOT REMOVE-CONTENTS PLACED HERE--></div>
    <!--END WEBSITE FEEDBACK SNIPPET-->

    <div id="elastic-nav" style="display:none;"></div>
    <script src="https://www.elastic.co/elastic-nav.js"></script>

    <!-- Subnav -->
    <div>
      <div>
        <div class="tertiary-nav d-none d-md-block">
          <div class="container">
            <div class="p-t-b-15 d-flex justify-content-between nav-container">
              <div class="breadcrum-wrapper"><span><a href="/guide/" style="font-size: 14px; font-weight: 600; color: #000;">Docs</a></span></div>
            </div>
          </div>
        </div>
      </div>
    </div>

    <div class="main-container">
      <section id="content">
        <div class="content-wrapper">

          <section id="guide" lang="en">
            <div class="container">
              <div class="row">
                <div class="col-xs-12 col-sm-8 col-md-8 guide-section">
                  <!-- start body -->
                  <div class="page_header">
<strong>IMPORTANT</strong>: No additional bug fixes or documentation updates
will be released for this version. For the latest information, see the
<a href="../current/index.html">current release documentation</a>.
</div>
<div id="content">
<div class="breadcrumbs">
<span class="breadcrumb-link"><a href="index.html">Elasticsearch Guide [7.7]</a></span>
»
<span class="breadcrumb-link"><a href="setup.html">Set up Elasticsearch</a></span>
»
<span class="breadcrumb-link"><a href="settings.html">Configuring Elasticsearch</a></span>
»
<span class="breadcrumb-node">Cluster-level shard allocation and routing settings</span>
</div>
<div class="navheader">
<span class="prev">
<a href="circuit-breaker.html">« Circuit breaker settings</a>
</span>
<span class="next">
<a href="ccr-settings.html">Cross-cluster replication settings »</a>
</span>
</div>
<div class="section">
<div class="titlepage"><div><div>
<h2 class="title">
<a id="modules-cluster"></a>Cluster-level shard allocation and routing settings<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster.asciidoc">edit</a>
</h2>
</div></div></div>
<p><em>Shard allocation</em> is the process of allocating shards to nodes. This can
happen during initial recovery, replica allocation, rebalancing, or
when nodes are added or removed.</p>
<p>One of the main roles of the master is to decide which shards to allocate to
which nodes, and when to move shards between nodes in order to rebalance the
cluster.</p>
<p>There are a number of settings available to control the shard allocation process:</p>
<div class="ulist itemizedlist">
<ul class="itemizedlist">
<li class="listitem">
<a class="xref" href="modules-cluster.html#cluster-shard-allocation-settings" title="Cluster-level shard allocation settings">Cluster-level shard allocation settings</a> control allocation and
rebalancing operations.
</li>
<li class="listitem">
<a class="xref" href="modules-cluster.html#disk-based-shard-allocation" title="Disk-based shard allocation settings">Disk-based shard allocation settings</a> explains how Elasticsearch takes available
disk space into account, and the related settings.
</li>
<li class="listitem">
<a class="xref" href="modules-cluster.html#shard-allocation-awareness" title="Shard allocation awareness">Shard allocation awareness</a> and <a class="xref" href="modules-cluster.html#forced-awareness" title="Forced awareness">Forced awareness</a> control how shards
can be distributed across different racks or availability zones.
</li>
<li class="listitem">
<a class="xref" href="modules-cluster.html#cluster-shard-allocation-filtering" title="Cluster-level shard allocation filtering">Cluster-level shard allocation filtering</a> allows certain nodes or groups of
nodes excluded from allocation so that they can be decommissioned.
</li>
</ul>
</div>
<p>Besides these, there are a few other <a class="xref" href="modules-cluster.html#misc-cluster-settings" title="Miscellaneous cluster settings">miscellaneous cluster-level settings</a>.</p>
<p>All of these settings are <em>dynamic</em> and can be
updated on a live cluster with the
<a class="xref" href="cluster-update-settings.html" title="Cluster update settings API">cluster-update-settings</a> API.</p>
<div class="section">
<div class="titlepage"><div><div>
<h3 class="title">
<a id="cluster-shard-allocation-settings"></a>Cluster-level shard allocation settings<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/shards_allocation.asciidoc">edit</a>
</h3>
</div></div></div>
<p>The following <em>dynamic</em> settings may be used to control shard allocation and recovery:</p>
<div class="variablelist">
<a id="cluster-routing-allocation-enable"></a>
<dl class="variablelist">
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.enable</code>
</span>
</dt>
<dd>
<p>Enable or disable allocation for specific kinds of shards:</p>
<div class="ulist itemizedlist">
<ul class="itemizedlist">
<li class="listitem">
<code class="literal">all</code> -             (default) Allows shard allocation for all kinds of shards.
</li>
<li class="listitem">
<code class="literal">primaries</code> -       Allows shard allocation only for primary shards.
</li>
<li class="listitem">
<code class="literal">new_primaries</code> -   Allows shard allocation only for primary shards for new indices.
</li>
<li class="listitem">
<code class="literal">none</code> -            No shard allocations of any kind are allowed for any indices.
</li>
</ul>
</div>
<p>This setting does not affect the recovery of local primary shards when
restarting a node.  A restarted node that has a copy of an unassigned primary
shard will recover that primary immediately, assuming that its allocation id matches
one of the active allocation ids in the cluster state.</p>
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.node_concurrent_incoming_recoveries</code>
</span>
</dt>
<dd>
How many concurrent incoming shard recoveries are allowed to happen on a node. Incoming recoveries are the recoveries
where the target shard (most likely the replica unless a shard is relocating) is allocated on the node. Defaults to <code class="literal">2</code>.
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.node_concurrent_outgoing_recoveries</code>
</span>
</dt>
<dd>
How many concurrent outgoing shard recoveries are allowed to happen on a node. Outgoing recoveries are the recoveries
where the source shard (most likely the primary unless a shard is relocating) is allocated on the node. Defaults to <code class="literal">2</code>.
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.node_concurrent_recoveries</code>
</span>
</dt>
<dd>
A shortcut to set both <code class="literal">cluster.routing.allocation.node_concurrent_incoming_recoveries</code> and
<code class="literal">cluster.routing.allocation.node_concurrent_outgoing_recoveries</code>.
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.node_initial_primaries_recoveries</code>
</span>
</dt>
<dd>
While the recovery of replicas happens over the network, the recovery of
an unassigned primary after node restart uses data from the local disk.
These should be fast so more initial primary recoveries can happen in
parallel on the same node.  Defaults to <code class="literal">4</code>.
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.same_shard.host</code>
</span>
</dt>
<dd>
Allows to perform a check to prevent allocation of multiple instances of
the same shard on a single host, based on host name and host address.
Defaults to <code class="literal">false</code>, meaning that no check is performed by default. This
setting only applies if multiple nodes are started on the same machine.
</dd>
</dl>
</div>
</div>

<div class="section">
<div class="titlepage"><div><div>
<h3 class="title">
<a id="shards-rebalancing-settings"></a>Shard rebalancing settings<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/shards_allocation.asciidoc">edit</a>
</h3>
</div></div></div>
<p>The following <em>dynamic</em> settings may be used to control the rebalancing of
shards across the cluster:</p>
<div class="variablelist">
<dl class="variablelist">
<dt>
<span class="term">
<code class="literal">cluster.routing.rebalance.enable</code>
</span>
</dt>
<dd>
<p>Enable or disable rebalancing for specific kinds of shards:</p>
<div class="ulist itemizedlist">
<ul class="itemizedlist">
<li class="listitem">
<code class="literal">all</code> -         (default) Allows shard balancing for all kinds of shards.
</li>
<li class="listitem">
<code class="literal">primaries</code> -   Allows shard balancing only for primary shards.
</li>
<li class="listitem">
<code class="literal">replicas</code> -    Allows shard balancing only for replica shards.
</li>
<li class="listitem">
<code class="literal">none</code> -        No shard balancing of any kind are allowed for any indices.
</li>
</ul>
</div>
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.allow_rebalance</code>
</span>
</dt>
<dd>
<p>Specify when shard rebalancing is allowed:</p>
<div class="ulist itemizedlist">
<ul class="itemizedlist">
<li class="listitem">
<code class="literal">always</code> -                    Always allow rebalancing.
</li>
<li class="listitem">
<code class="literal">indices_primaries_active</code> -  Only when all primaries in the cluster are allocated.
</li>
<li class="listitem">
<code class="literal">indices_all_active</code> -        (default) Only when all shards (primaries and replicas) in the cluster are allocated.
</li>
</ul>
</div>
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.cluster_concurrent_rebalance</code>
</span>
</dt>
<dd>
Allow to control how many concurrent shard rebalances are
allowed cluster wide. Defaults to <code class="literal">2</code>. Note that this setting
only controls the number of concurrent shard relocations due
to imbalances in the cluster. This setting does not limit shard
relocations due to <a class="xref" href="modules-cluster.html#cluster-shard-allocation-filtering" title="Cluster-level shard allocation filtering">allocation
filtering</a> or <a class="xref" href="modules-cluster.html#forced-awareness" title="Forced awareness">forced awareness</a>.
</dd>
</dl>
</div>
</div>

<div class="section">
<div class="titlepage"><div><div>
<h3 class="title">
<a id="shards-rebalancing-heuristics"></a>Shard balancing heuristics settings<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/shards_allocation.asciidoc">edit</a>
</h3>
</div></div></div>
<p>The following settings are used together to determine where to place each
shard.  The cluster is balanced when no allowed rebalancing operation can bring the weight
of any node closer to the weight of any other node by more than the <code class="literal">balance.threshold</code>.</p>
<div class="variablelist">
<dl class="variablelist">
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.balance.shard</code>
</span>
</dt>
<dd>
Defines the weight factor for the total number of shards allocated on a node
(float). Defaults to <code class="literal">0.45f</code>.  Raising this raises the tendency to
equalize the number of shards across all nodes in the cluster.
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.balance.index</code>
</span>
</dt>
<dd>
Defines the weight factor for the number of shards per index allocated
 on a specific node (float). Defaults to <code class="literal">0.55f</code>.  Raising this raises the
 tendency to equalize the number of shards per index across all nodes in
 the cluster.
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.balance.threshold</code>
</span>
</dt>
<dd>
Minimal optimization value of operations that should be performed (non
 negative float). Defaults to <code class="literal">1.0f</code>.  Raising this will cause the cluster
 to be less aggressive about optimizing the shard balance.
</dd>
</dl>
</div>
<div class="note admon">
<div class="icon"></div>
<div class="admon_content">
<p>Regardless of the result of the balancing algorithm, rebalancing might
not be allowed due to forced awareness or allocation filtering.</p>
</div>
</div>
</div>

<div class="section">
<div class="titlepage"><div><div>
<h3 class="title">
<a id="disk-based-shard-allocation"></a>Disk-based shard allocation settings<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/disk_allocator.asciidoc">edit</a>
</h3>
</div></div></div>
<p>Elasticsearch considers the available disk space on a node before deciding
whether to allocate new shards to that node or to actively relocate shards away
from that node.</p>
<p>Below are the settings that can be configured in the <code class="literal">elasticsearch.yml</code> config
file or updated dynamically on a live cluster with the
<a class="xref" href="cluster-update-settings.html" title="Cluster update settings API">cluster-update-settings</a> API:</p>
<div class="variablelist">
<dl class="variablelist">
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.disk.threshold_enabled</code>
</span>
</dt>
<dd>
Defaults to <code class="literal">true</code>.  Set to <code class="literal">false</code> to disable the disk allocation decider.
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.disk.watermark.low</code>
</span>
</dt>
<dd>
Controls the low watermark for disk usage. It defaults to <code class="literal">85%</code>, meaning
that Elasticsearch will not allocate shards to nodes that have more than
85% disk used. It can also be set to an absolute byte value (like <code class="literal">500mb</code>)
to prevent Elasticsearch from allocating shards if less than the specified
amount of space is available. This setting has no effect on the primary
shards of newly-created indices but will prevent their replicas from being allocated.
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.disk.watermark.high</code>
</span>
</dt>
<dd>
Controls the high watermark. It defaults to <code class="literal">90%</code>, meaning that
Elasticsearch will attempt to relocate shards away from a node whose disk
usage is above 90%. It can also be set to an absolute byte value (similarly
to the low watermark) to relocate shards away from a node if it has less
than the specified amount of free space. This setting affects the
allocation of all shards, whether previously allocated or not.
</dd>
</dl>
</div>
<div class="variablelist">
<a id="cluster-routing-flood_stage"></a>
<dl class="variablelist">
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.disk.watermark.flood_stage</code>
</span>
</dt>
<dd>
<p>Controls the flood stage watermark, which defaults to 95%. Elasticsearch enforces a read-only index block
(<code class="literal">index.blocks.read_only_allow_delete</code>) on every index that has one or more
shards allocated on the node, and that has at least one disk exceeding the flood
stage. This setting is a last resort to prevent nodes from running out of disk space.
The index block is automatically released when the disk utilization falls below
the high watermark.</p>
<div class="note admon">
<div class="icon"></div>
<div class="admon_content">
<p>You cannot mix the usage of percentage values and byte values within
these settings. Either all values are set to percentage values, or all are set to byte
values. This enforcement is so that Elasticsearch can validate that the settings are internally
consistent, ensuring that the low disk threshold is less than the high disk
threshold, and the high disk threshold is less than the flood stage
threshold.</p>
</div>
</div>
<p>An example of resetting the read-only index block on the <code class="literal">twitter</code> index:</p>
<div class="pre_wrapper lang-console">
<pre class="programlisting prettyprint lang-console">PUT /twitter/_settings
{
  "index.blocks.read_only_allow_delete": null
}</pre>
</div>
<div class="console_widget" data-snippet="snippets/19.console"></div>
</dd>
<dt>
<span class="term">
<code class="literal">cluster.info.update.interval</code>
</span>
</dt>
<dd>
How often Elasticsearch should check on disk usage for each node in the
cluster. Defaults to <code class="literal">30s</code>.
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.disk.include_relocations</code>
</span>
</dt>
<dd>
<span class="Admonishment Admonishment--change">
[<span class="Admonishment-version u-mono u-strikethrough">7.5.0</span>]
<span class="Admonishment-detail">
Deprecated in 7.5.0. Future versions will always account for relocations.
</span>
</span>
Defaults to <code class="literal">true</code>, which means that Elasticsearch will take into account
shards that are currently being relocated to the target node when computing
a node’s disk usage. Taking relocating shards' sizes into account may,
however, mean that the disk usage for a node is incorrectly estimated on
the high side, since the relocation could be 90% complete and a recently
retrieved disk usage would include the total size of the relocating shard
as well as the space already used by the running relocation.
</dd>
</dl>
</div>
<div class="note admon">
<div class="icon"></div>
<div class="admon_content">
<p>Percentage values refer to used disk space, while byte values refer to
free disk space. This can be confusing, since it flips the meaning of high and
low. For example, it makes sense to set the low watermark to 10gb and the high
watermark to 5gb, but not the other way around.</p>
</div>
</div>
<p>An example of updating the low watermark to at least 100 gigabytes free, a high
watermark of at least 50 gigabytes free, and a flood stage watermark of 10
gigabytes free, and updating the information about the cluster every minute:</p>
<div class="pre_wrapper lang-console">
<pre class="programlisting prettyprint lang-console">PUT _cluster/settings
{
  "transient": {
    "cluster.routing.allocation.disk.watermark.low": "100gb",
    "cluster.routing.allocation.disk.watermark.high": "50gb",
    "cluster.routing.allocation.disk.watermark.flood_stage": "10gb",
    "cluster.info.update.interval": "1m"
  }
}</pre>
</div>
<div class="console_widget" data-snippet="snippets/20.console"></div>
</div>

<div class="section">
<div class="titlepage"><div><div>
<h3 class="title">
<a id="shard-allocation-awareness"></a>Shard allocation awareness<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/allocation_awareness.asciidoc">edit</a>
</h3>
</div></div></div>
<p>You can use custom node attributes as <em>awareness attributes</em> to enable Elasticsearch
to take your physical hardware configuration into account when allocating shards.
If Elasticsearch knows which nodes are on the same physical server, in the same rack, or
in the same zone, it can distribute the primary shard and its replica shards to
minimise the risk of losing all shard copies in the event of a failure.</p>
<p>When shard allocation awareness is enabled with the
<code class="literal">cluster.routing.allocation.awareness.attributes</code> setting, shards are only
allocated to nodes that have values set for the specified awareness
attributes. If you use multiple awareness attributes, Elasticsearch considers
each attribute separately when allocating shards.</p>
<p>The allocation awareness settings can be configured in
<code class="literal">elasticsearch.yml</code> and updated dynamically with the
<a class="xref" href="cluster-update-settings.html" title="Cluster update settings API">cluster-update-settings</a> API.</p>
<p>By default Elasticsearch uses <a class="xref" href="search.html#search-adaptive-replica" title="Adaptive Replica Selection">adaptive replica selection</a>
to route search or GET requests. However, with the presence of allocation awareness
attributes Elasticsearch will prefer using shards in the same location (with the same
awareness attribute values) to process these requests. This behavior can be
disabled by specifying <code class="literal">export ES_JAVA_OPTS="$ES_JAVA_OPTS -Des.search.ignore_awareness_attributes=true"</code>
system property on every node that is part of the cluster.</p>
<div class="note admon">
<div class="icon"></div>
<div class="admon_content">
<p>The number of attribute values determines how many shard copies are
allocated in each location. If the number of nodes in each location is
unbalanced and there are a lot of replicas, replica shards might be left
unassigned.</p>
</div>
</div>
<div class="section">
<div class="titlepage"><div><div>
<h4 class="title">
<a id="enabling-awareness"></a>Enabling shard allocation awareness<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/allocation_awareness.asciidoc">edit</a>
</h4>
</div></div></div>
<p>To enable shard allocation awareness:</p>
<div class="olist orderedlist">
<ol class="orderedlist">
<li class="listitem">
<p>Specify the location of each node with a custom node attribute. For example,
if you want Elasticsearch to distribute shards across different racks, you might
set an awareness attribute called <code class="literal">rack_id</code> in each node’s <code class="literal">elasticsearch.yml</code>
config file.</p>
<div class="pre_wrapper lang-yaml">
<pre class="programlisting prettyprint lang-yaml">node.attr.rack_id: rack_one</pre>
</div>
<p>You can also set custom attributes when you start a node:</p>
<div class="pre_wrapper lang-sh">
<pre class="programlisting prettyprint lang-sh">`./bin/elasticsearch -Enode.attr.rack_id=rack_one`</pre>
</div>
</li>
<li class="listitem">
<p>Tell Elasticsearch to take one or more awareness attributes into account when
allocating shards by setting
<code class="literal">cluster.routing.allocation.awareness.attributes</code> in <span class="strong strong"><strong>every</strong></span> master-eligible
node’s <code class="literal">elasticsearch.yml</code> config file.</p>
<div class="pre_wrapper lang-yaml">
<pre class="programlisting prettyprint lang-yaml">cluster.routing.allocation.awareness.attributes: rack_id <a id="CO7-1"></a><i class="conum" data-value="1"></i></pre>
</div>
<div class="calloutlist">
<table border="0" summary="Callout list">
<tr>
<td align="left" valign="top" width="5%">
<p><a href="#CO7-1"><i class="conum" data-value="1"></i></a></p>
</td>
<td align="left" valign="top">
<p>Specify multiple attributes as a comma-separated list.</p>
</td>
</tr>
</table>
</div>
<p>You can also use the
<a class="xref" href="cluster-update-settings.html" title="Cluster update settings API">cluster-update-settings</a> API to set or update
a cluster’s awareness attributes.</p>
</li>
</ol>
</div>
<p>With this example configuration, if you start two nodes with
<code class="literal">node.attr.rack_id</code> set to <code class="literal">rack_one</code> and create an index with 5 primary
shards and 1 replica of each primary, all primaries and replicas are
allocated across the two nodes.</p>
<p>If you add two nodes with <code class="literal">node.attr.rack_id</code> set to <code class="literal">rack_two</code>,
Elasticsearch moves shards to the new nodes, ensuring (if possible)
that no two copies of the same shard are in the same rack.</p>
<p>If <code class="literal">rack_two</code> fails and takes down both its nodes, by default Elasticsearch
allocates the lost shard copies to nodes in <code class="literal">rack_one</code>. To prevent multiple
copies of a particular shard from being allocated in the same location, you can
enable forced awareness.</p>
</div>

<div class="section">
<div class="titlepage"><div><div>
<h4 class="title">
<a id="forced-awareness"></a>Forced awareness<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/allocation_awareness.asciidoc">edit</a>
</h4>
</div></div></div>
<p>By default, if one location fails, Elasticsearch assigns all of the missing
replica shards to the remaining locations. While you might have sufficient
resources across all locations to host your primary and replica shards, a single
location might be unable to host <span class="strong strong"><strong>ALL</strong></span> of the shards.</p>
<p>To prevent a single location from being overloaded in the event of a failure,
you can set <code class="literal">cluster.routing.allocation.awareness.force</code> so no replicas are
allocated until nodes are available in another location.</p>
<p>For example, if you have an awareness attribute called <code class="literal">zone</code> and configure nodes
in <code class="literal">zone1</code> and <code class="literal">zone2</code>, you can use forced awareness to prevent Elasticsearch
from allocating replicas if only one zone is available:</p>
<div class="pre_wrapper lang-yaml">
<pre class="programlisting prettyprint lang-yaml">cluster.routing.allocation.awareness.attributes: zone
cluster.routing.allocation.awareness.force.zone.values: zone1,zone2 <a id="CO8-1"></a><i class="conum" data-value="1"></i></pre>
</div>
<div class="calloutlist">
<table border="0" summary="Callout list">
<tr>
<td align="left" valign="top" width="5%">
<p><a href="#CO8-1"><i class="conum" data-value="1"></i></a></p>
</td>
<td align="left" valign="top">
<p>Specify all possible values for the awareness attribute.</p>
</td>
</tr>
</table>
</div>
<p>With this example configuration, if you start two nodes with <code class="literal">node.attr.zone</code> set
to <code class="literal">zone1</code> and create an index with 5 shards and 1 replica, Elasticsearch creates
the index and allocates the 5 primary shards but no replicas. Replicas are
only allocated once nodes with <code class="literal">node.attr.zone</code> set to <code class="literal">zone2</code> are available.</p>
</div>

</div>

<div class="section">
<div class="titlepage"><div><div>
<h3 class="title">
<a id="cluster-shard-allocation-filtering"></a>Cluster-level shard allocation filtering<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/allocation_filtering.asciidoc">edit</a>
</h3>
</div></div></div>
<p>You can use cluster-level shard allocation filters to control where Elasticsearch
allocates shards from any index. These cluster wide filters are applied in
conjunction with <a class="xref" href="shard-allocation-filtering.html" title="Index-level shard allocation filtering">per-index allocation filtering</a>
and <a class="xref" href="modules-cluster.html#shard-allocation-awareness" title="Shard allocation awareness">allocation awareness</a>.</p>
<p>Shard allocation filters can be based on custom node attributes or the built-in
<code class="literal">_name</code>, <code class="literal">_host_ip</code>, <code class="literal">_publish_ip</code>, <code class="literal">_ip</code>, <code class="literal">_host</code> and <code class="literal">_id</code> attributes.</p>
<p>The <code class="literal">cluster.routing.allocation</code> settings are dynamic, enabling live indices to
be moved from one set of nodes to another. Shards are only relocated if it is
possible to do so without breaking another routing constraint, such as never
allocating a primary and replica shard on the same node.</p>
<p>The most common use case for cluster-level shard allocation filtering is when
you want to decommission a node. To move shards off of a node prior to shutting
it down, you could create a filter that excludes the node by its IP address:</p>
<div class="pre_wrapper lang-console">
<pre class="programlisting prettyprint lang-console">PUT _cluster/settings
{
  "transient" : {
    "cluster.routing.allocation.exclude._ip" : "10.0.0.1"
  }
}</pre>
</div>
<div class="console_widget" data-snippet="snippets/21.console"></div>
<div class="section">
<div class="titlepage"><div><div>
<h4 class="title">
<a id="cluster-routing-settings"></a>Cluster routing settings<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/allocation_filtering.asciidoc">edit</a>
</h4>
</div></div></div>
<div class="variablelist">
<dl class="variablelist">
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.include.{attribute}</code>
</span>
</dt>
<dd>
Allocate shards to a node whose <code class="literal">{attribute}</code> has at least one of the
comma-separated values.
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.require.{attribute}</code>
</span>
</dt>
<dd>
Only allocate shards to a node whose <code class="literal">{attribute}</code> has <em>all</em> of the
comma-separated values.
</dd>
<dt>
<span class="term">
<code class="literal">cluster.routing.allocation.exclude.{attribute}</code>
</span>
</dt>
<dd>
Do not allocate shards to a node whose <code class="literal">{attribute}</code> has <em>any</em> of the
comma-separated values.
</dd>
</dl>
</div>
<p>The cluster allocation settings support the following built-in attributes:</p>
<div class="informaltable">
<table border="0" cellpadding="4px">
<colgroup>
<col>
<col>
</colgroup>
<tbody valign="top">
<tr>
<td valign="top">
<p>
<code class="literal">_name</code>
</p>
</td>
<td valign="top">
<p>
Match nodes by node name
</p>
</td>
</tr>
<tr>
<td valign="top">
<p>
<code class="literal">_host_ip</code>
</p>
</td>
<td valign="top">
<p>
Match nodes by host IP address (IP associated with hostname)
</p>
</td>
</tr>
<tr>
<td valign="top">
<p>
<code class="literal">_publish_ip</code>
</p>
</td>
<td valign="top">
<p>
Match nodes by publish IP address
</p>
</td>
</tr>
<tr>
<td valign="top">
<p>
<code class="literal">_ip</code>
</p>
</td>
<td valign="top">
<p>
Match either <code class="literal">_host_ip</code> or <code class="literal">_publish_ip</code>
</p>
</td>
</tr>
<tr>
<td valign="top">
<p>
<code class="literal">_host</code>
</p>
</td>
<td valign="top">
<p>
Match nodes by hostname
</p>
</td>
</tr>
<tr>
<td valign="top">
<p>
<code class="literal">_id</code>
</p>
</td>
<td valign="top">
<p>
Match nodes by node id
</p>
</td>
</tr>
</tbody>
</table>
</div>
<p>You can use wildcards when specifying attribute values, for example:</p>
<div class="pre_wrapper lang-console">
<pre class="programlisting prettyprint lang-console">PUT _cluster/settings
{
  "transient": {
    "cluster.routing.allocation.exclude._ip": "192.168.2.*"
  }
}</pre>
</div>
<div class="console_widget" data-snippet="snippets/22.console"></div>
</div>

</div>

<div class="section">
<div class="titlepage"><div><div>
<h3 class="title">
<a id="misc-cluster-settings"></a>Miscellaneous cluster settings<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/misc.asciidoc">edit</a>
</h3>
</div></div></div>
<div class="section">
<div class="titlepage"><div><div>
<h4 class="title">
<a id="cluster-read-only"></a>Metadata<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/misc.asciidoc">edit</a>
</h4>
</div></div></div>
<p>An entire cluster may be set to read-only with the following <em>dynamic</em> setting:</p>
<div class="variablelist">
<dl class="variablelist">
<dt>
<span class="term">
<code class="literal">cluster.blocks.read_only</code>
</span>
</dt>
<dd>
Make the whole cluster read only (indices do not accept write
operations), metadata is not allowed to be modified (create or delete
indices).
</dd>
<dt>
<span class="term">
<code class="literal">cluster.blocks.read_only_allow_delete</code>
</span>
</dt>
<dd>
Identical to <code class="literal">cluster.blocks.read_only</code> but allows to delete indices
to free up resources.
</dd>
</dl>
</div>
<div class="warning admon">
<div class="icon"></div>
<div class="admon_content">
<p>Don’t rely on this setting to prevent changes to your cluster. Any
user with access to the <a class="xref" href="cluster-update-settings.html" title="Cluster update settings API">cluster-update-settings</a>
API can make the cluster read-write again.</p>
</div>
</div>
</div>

<div class="section">
<div class="titlepage"><div><div>
<h4 class="title">
<a id="cluster-shard-limit"></a>Cluster shard limit<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/misc.asciidoc">edit</a>
</h4>
</div></div></div>
<p>There is a soft limit on the number of shards in a cluster, based on the number
of nodes in the cluster. This is intended to prevent operations which may
unintentionally destabilize the cluster.</p>
<div class="important admon">
<div class="icon"></div>
<div class="admon_content">
<p>This limit is intended as a safety net, not a sizing recommendation. The
exact number of shards your cluster can safely support depends on your hardware
configuration and workload, but should remain well below this limit in almost
all cases, as the default limit is set quite high.</p>
</div>
</div>
<p>If an operation, such as creating a new index, restoring a snapshot of an index,
or opening a closed index would lead to the number of shards in the cluster
going over this limit, the operation will fail with an error indicating the
shard limit.</p>
<p>If the cluster is already over the limit, due to changes in node membership or
setting changes, all operations that create or open indices will fail until
either the limit is increased as described below, or some indices are
<a class="xref" href="indices-open-close.html" title="Open index API">closed</a> or <a class="xref" href="indices-delete-index.html" title="Delete index API">deleted</a> to bring the
number of shards below the limit.</p>
<p>Replicas count towards this limit, but closed indexes do not. An index with 5
primary shards and 2 replicas will be counted as 15 shards. Any closed index
is counted as 0, no matter how many shards and replicas it contains.</p>
<p>The limit defaults to 1,000 shards per data node, and can be dynamically
adjusted using the following property:</p>
<div class="variablelist">
<dl class="variablelist">
<dt>
<span class="term">
<code class="literal">cluster.max_shards_per_node</code>
</span>
</dt>
<dd>
Controls the number of shards allowed in the cluster per data node.
</dd>
</dl>
</div>
<p>For example, a 3-node cluster with the default setting would allow 3,000 shards
total, across all open indexes. If the above setting is changed to 500, then
the cluster would allow 1,500 shards total.</p>
<div class="note admon">
<div class="icon"></div>
<div class="admon_content">
<p>If there are no data nodes in the cluster, the limit will not be enforced.
This allows the creation of indices during cluster creation if dedicated master
nodes are set up before data nodes.</p>
</div>
</div>
</div>

<div class="section">
<div class="titlepage"><div><div>
<h4 class="title">
<a id="user-defined-data"></a>User-defined cluster metadata<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/misc.asciidoc">edit</a>
</h4>
</div></div></div>
<p>User-defined metadata can be stored and retrieved using the Cluster Settings API.
This can be used to store arbitrary, infrequently-changing data about the cluster
without the need to create an index to store it. This data may be stored using
any key prefixed with <code class="literal">cluster.metadata.</code>.  For example, to store the email
address of the administrator of a cluster under the key <code class="literal">cluster.metadata.administrator</code>,
issue this request:</p>
<div class="pre_wrapper lang-console">
<pre class="programlisting prettyprint lang-console">PUT /_cluster/settings
{
  "persistent": {
    "cluster.metadata.administrator": "sysadmin@example.com"
  }
}</pre>
</div>
<div class="console_widget" data-snippet="snippets/23.console"></div>
<div class="important admon">
<div class="icon"></div>
<div class="admon_content">
<p>User-defined cluster metadata is not intended to store sensitive or
confidential information. Any information stored in user-defined cluster
metadata will be viewable by anyone with access to the
<a class="xref" href="cluster-get-settings.html" title="Cluster get settings API">Cluster Get Settings</a> API, and is recorded in the
Elasticsearch logs.</p>
</div>
</div>
</div>

<div class="section">
<div class="titlepage"><div><div>
<h4 class="title">
<a id="cluster-max-tombstones"></a>Index tombstones<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/misc.asciidoc">edit</a>
</h4>
</div></div></div>
<p>The cluster state maintains index tombstones to explicitly denote indices that
have been deleted.  The number of tombstones maintained in the cluster state is
controlled by the following property, which cannot be updated dynamically:</p>
<div class="variablelist">
<dl class="variablelist">
<dt>
<span class="term">
<code class="literal">cluster.indices.tombstones.size</code>
</span>
</dt>
<dd>
Index tombstones prevent nodes that are not part of the cluster when a delete
occurs from joining the cluster and reimporting the index as though the delete
was never issued. To keep the cluster state from growing huge we only keep the
last <code class="literal">cluster.indices.tombstones.size</code> deletes, which defaults to 500. You can
increase it if you expect nodes to be absent from the cluster and miss more
than 500 deletes. We think that is rare, thus the default. Tombstones don’t take
up much space, but we also think that a number like 50,000 is probably too big.
</dd>
</dl>
</div>
</div>

<div class="section">
<div class="titlepage"><div><div>
<h4 class="title">
<a id="cluster-logger"></a>Logger<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/misc.asciidoc">edit</a>
</h4>
</div></div></div>
<p>The settings which control logging can be updated dynamically with the
<code class="literal">logger.</code> prefix.  For instance, to increase the logging level of the
<code class="literal">indices.recovery</code> module to <code class="literal">DEBUG</code>, issue this request:</p>
<div class="pre_wrapper lang-console">
<pre class="programlisting prettyprint lang-console">PUT /_cluster/settings
{
  "transient": {
    "logger.org.elasticsearch.indices.recovery": "DEBUG"
  }
}</pre>
</div>
<div class="console_widget" data-snippet="snippets/24.console"></div>
</div>

<div class="section">
<div class="titlepage"><div><div>
<h4 class="title">
<a id="persistent-tasks-allocation"></a>Persistent tasks allocation<a class="edit_me edit_me_private" rel="nofollow" title="Editing on GitHub is available to Elastic" href="https://github.com/elastic/elasticsearch/edit/7.7/docs/reference/modules/cluster/misc.asciidoc">edit</a>
</h4>
</div></div></div>
<p>Plugins can create a kind of tasks called persistent tasks. Those tasks are
usually long-lived tasks and are stored in the cluster state, allowing the
tasks to be revived after a full cluster restart.</p>
<p>Every time a persistent task is created, the master node takes care of
assigning the task to a node of the cluster, and the assigned node will then
pick up the task and execute it locally. The process of assigning persistent
tasks to nodes is controlled by the following properties, which can be updated
dynamically:</p>
<div class="variablelist">
<dl class="variablelist">
<dt>
<span class="term">
<code class="literal">cluster.persistent_tasks.allocation.enable</code>
</span>
</dt>
<dd>
<p>Enable or disable allocation for persistent tasks:</p>
<div class="ulist itemizedlist">
<ul class="itemizedlist">
<li class="listitem">
<code class="literal">all</code> -             (default) Allows persistent tasks to be assigned to nodes
</li>
<li class="listitem">
<code class="literal">none</code> -            No allocations are allowed for any type of persistent task
</li>
</ul>
</div>
<p>This setting does not affect the persistent tasks that are already being executed.
Only newly created persistent tasks, or tasks that must be reassigned (after a node
left the cluster, for example), are impacted by this setting.</p>
</dd>
<dt>
<span class="term">
<code class="literal">cluster.persistent_tasks.allocation.recheck_interval</code>
</span>
</dt>
<dd>
The master node will automatically check whether persistent tasks need to
be assigned when the cluster state changes significantly. However, there
may be other factors, such as memory usage, that affect whether persistent
tasks can be assigned to nodes but do not cause the cluster state to change.
This setting controls how often assignment checks are performed to react to
these factors. The default is 30 seconds. The minimum permitted value is 10
seconds.
</dd>
</dl>
</div>
</div>

</div>

</div>
<div class="navfooter">
<span class="prev">
<a href="circuit-breaker.html">« Circuit breaker settings</a>
</span>
<span class="next">
<a href="ccr-settings.html">Cross-cluster replication settings »</a>
</span>
</div>
</div>

                  <!-- end body -->
                </div>
                <div class="col-xs-12 col-sm-4 col-md-4" id="right_col">
                  <div id="rtpcontainer" style="display: block;">
                    <div class="mktg-promo">
                      <h3>Most Popular</h3>
                      <ul class="icons">
                        <li class="icon-elasticsearch-white"><a href="https://www.elastic.co/webinars/getting-started-elasticsearch?baymax=default&amp;elektra=docs&amp;storm=top-video">Get Started with Elasticsearch: Video</a></li>
                        <li class="icon-kibana-white"><a href="https://www.elastic.co/webinars/getting-started-kibana?baymax=default&amp;elektra=docs&amp;storm=top-video">Intro to Kibana: Video</a></li>
                        <li class="icon-logstash-white"><a href="https://www.elastic.co/webinars/introduction-elk-stack?baymax=default&amp;elektra=docs&amp;storm=top-video">ELK for Logs &amp; Metrics: Video</a></li>
                      </ul>
                    </div>
                  </div>
                </div>
              </div>
            </div>
          </section>

        </div>


<div id="elastic-footer"></div>
<script src="https://www.elastic.co/elastic-footer.js"></script>
<!-- Footer Section end-->

      </section>
    </div>

<script src="/guide/static/jquery.js"></script>
<script type="text/javascript" src="/guide/static/docs.js"></script>
<script type="text/javascript">
  window.initial_state = {}</script>
  </body>
</html>
