"""
@author: 江同学呀
@file: index_setting.py
@date: 2024/2/9 1:23
@desc:
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/index-modules.html#dynamic-index-settings
"""
from typing import Dict, Union, List

from espc.common.index_common import QueryType, ShardAllocation, ShardRebalancing
from espc.orm.configure.index_lifecycle_management import ILMSetting
from espc.orm.model.base.base import _Base

CUSTOM: str = "custom"


class StaticIndexSetting(_Base):
    """
    静态索引设置
    :param number_of_shards:
        The number of primary shards that an index should have. Defaults to 1. This setting can only be set at index
        creation time. It cannot be changed on a closed index.
        索引应具有的主分片数。默认值为 1。此设置只能在创建索引时设置。不能在已关闭的索引上更改它。
    :param number_of_routing_shards:
        Integer value used with index.number_of_shards to route documents to a primary shard. See _routing field.
        This setting’s default value depends on the number of primary shards in the index. The default is designed to
        allow you to split by factors of 2 up to a maximum of 1024 shards.
        与 index.number_of_shards 一起使用的整数值，用于将文档路由到主分片。请参阅 _routing 字段。
        此设置的默认值取决于索引中的主分片数。默认值旨在允许您按 2 个因子拆分最多 1024 个分片。
    :param codec:
        The default value compresses stored data with LZ4 compression, but this can be set to best_compression which
        uses DEFLATE for a higher compression ratio, at the expense of slower stored fields performance. If you are
        updating the compression type, the new one will be applied after segments are merged. Segment merging can be
        forced using force merge.
        默认值使用 LZ4 压缩压缩存储的数据，但可以将其设置为 best_compression，它使用 DEFLATE 以获得更高的压缩率，但代价是存储字段性能较慢。
        如果要更新压缩类型，则在合并区段后将应用新压缩类型。可以使用 force merge 强制进行区段合并。
    :param routing_partition_size:
        The number of shards a custom routing value can go to. Defaults to 1 and can only be set at index creation time.
        This value must be less than the index.number_of_shards unless the index.number_of_shards value is also 1. See
        Routing to an index partition for more details about how this setting is used.
        自定义路由值可以转到的分片数。默认为 1，并且只能在创建索引时设置。此值必须小于 index.number_of_shards，除非
        index.number_of_shards 值也是 1。有关如何使用此设置的更多详细信息，请参阅 路由到索引分区。
    :param soft_deletes_enabled:
        [7.6.0] Deprecated in 7.6.0. Creating indices with soft-deletes disabled is deprecated and will be removed in
        future Elasticsearch versions.
        Indicates whether soft deletes are enabled on the index. Soft deletes can only be configured at index creation
        and only on indices created on or after Elasticsearch 6.5.0. Defaults to true.
        在 7.6.0 中已废弃。在禁用软删除的情况下创建索引已弃用，并将在未来的 Elasticsearch 版本中删除。
        指示是否对索引启用软删除。软删除只能 在创建索引时配置，并且仅在创建之日或之后创建的索引上配置 Elasticsearch 6.5.0 版。默认为 true。
    :param soft_deletes_retention_lease_period:
        The maximum period to retain a shard history retention lease before it is considered expired. Shard history
        retention leases ensure that soft deletes are retained during merges on the Lucene index. If a soft delete is
        merged away before it can be replicated to a follower the following process will fail due to incomplete history
        on the leader. Defaults to 12h.
        在分片历史记录保留租约被视为过期之前保留该租约的最长期限。分片历史记录保留租约可确保在 Lucene 索引合并期间保留软删除。如果软删除在
        复制到后继者之前被合并出去，则以下过程将失败，因为领导者的历史记录不完整。默认为 12h。
    :param load_fixed_bitset_filters_eagerly:
        Indicates whether cached filters are pre-loaded for nested queries. Possible values are true (default) and
        false.
        指示是否为嵌套查询预加载缓存的筛选器。可能的值为 true （default） 和 false。
    :param shard_check_on_startup:
        Elasticsearch automatically performs integrity checks on the contents of shards at various points during their
        lifecycle. For instance, it verifies the checksum of every file transferred when recovering a replica or taking
        a snapshot. It also verifies the integrity of many important files when opening a shard, which happens when
        starting up a node and when finishing a shard recovery or relocation. You can therefore manually verify the
        integrity of a whole shard while it is running by taking a snapshot of it into a fresh repository or by
        recovering it onto a fresh node.
        This setting determines whether Elasticsearch performs additional integrity checks while opening a shard. If
        these checks detect corruption then they will prevent the shard from being opened. It accepts the following
        values:
        Elasticsearch 会在分片生命周期的各个时间点自动对分片的内容执行完整性检查。例如，它会验证在恢复副本或拍摄快照时传输的每个文件的校验和。
        它还会在打开分片时验证许多重要文件的完整性，这在启动节点和完成分片恢复或重新定位时发生。因此，您可以在整个分片运行时手动验证其完整性，
        方法是将快照拍摄到新的存储库中，或者将其恢复到新的节点上。
        此设置确定 Elasticsearch 在打开分片时是否执行其他完整性检查。如果这些检查检测到损坏，则它们将阻止打开分片。它接受以下值：
    """
    def __init__(
            self,
            number_of_shards: Union[str, int] = None,
            number_of_routing_shards: Union[str, int] = None,
            codec: str = None,
            routing_partition_size: Union[str, int] = None,
            soft_deletes_enabled: bool = None,
            soft_deletes_retention_lease_period: str = None,
            load_fixed_bitset_filters_eagerly: bool = None,
            shard_check_on_startup: Union[str, bool] = None,
            **kwargs
    ):
        super().__init__(**kwargs)
        self._number_of_shards: Union[str, int] = number_of_shards
        self._number_of_routing_shards: Union[str, int] = number_of_routing_shards
        self._codec: str = codec
        self._routing_partition_size: Union[str, int] = routing_partition_size
        self._soft_deletes_enabled: bool = soft_deletes_enabled
        self._soft_deletes_retention_lease_period: str = soft_deletes_retention_lease_period
        self._load_fixed_bitset_filters_eagerly: bool = load_fixed_bitset_filters_eagerly
        self._shard_check_on_startup: Union[str, bool] = shard_check_on_startup
        return

    def _build(self) -> Dict:
        """
        构建结构体
        :return:
        """
        try:
            body: Dict = {} | super()._build()
        except NotImplementedError:
            body: Dict = {}
        if self._number_of_shards:
            body["number_of_shards"] = self._number_of_shards
        if self._number_of_routing_shards:
            body["number_of_routing_shards"] = self._number_of_routing_shards
        if self._codec:
            body["codec"] = self._codec
        if self._routing_partition_size:
            body["routing_partition_size"] = self._routing_partition_size
        if self._soft_deletes_enabled is not None:
            if "soft_deletes" not in body:
                body["soft_deletes"] = {}
            body["soft_deletes"]["enabled"] = self._soft_deletes_enabled
        if self._soft_deletes_retention_lease_period is not None:
            if "soft_deletes" not in body:
                body["soft_deletes"] = {"retention_lease": {}}
            if "retention_lease" not in body["soft_deletes"]:
                body["soft_deletes"]["retention_lease"] = {}
            body["soft_deletes"]["retention_lease"]["period"] = self._soft_deletes_retention_lease_period
        if self._load_fixed_bitset_filters_eagerly:
            body["load_fixed_bitset_filters_eagerly"] = self._load_fixed_bitset_filters_eagerly
        if self._shard_check_on_startup:
            if "shard" not in body:
                body["shard"] = {}
            body["shard"]["check_on_startup"] = self._shard_check_on_startup
        return body


class DynamicIndexSetting(_Base):
    """
    动态索引设置
    :param number_of_replicas:
        The number of replicas each primary shard has. Defaults to 1.
        每个主分片具有的副本数。默认值为 1。
    :param auto_expand_replicas:
        Auto-expand the number of replicas based on the number of data nodes in the cluster. Set to a dash delimited
        lower and upper bound (e.g. 0-5) or use all for the upper bound (e.g. 0-all). Defaults to false (i.e. disabled).
        Note that the auto-expanded number of replicas only takes allocation filtering rules into account, but ignores
        other allocation rules such as total shards per node, and this can lead to the cluster health becoming YELLOW
        if the applicable rules prevent all the replicas from being allocated.
        If the upper bound is all then shard allocation awareness and cluster.routing.allocation.same_shard.host are
        ignored for this index.
        根据集群中的数据节点数自动扩展副本数。设置为以破折号分隔的下限和上限（例如 0-5），或使用 all 作为上限（例如 0-all）。默认为 false
        （即 disabled）。请注意，自动扩展的副本数仅考虑分配筛选规则，但忽略其他分配规则，例如每个节点的总分片数，如果适用的规则阻止分配所有副本，
        这可能会导致集群运行状况变为黄色。
        如果上限为 all，则此索引的分片分配感知和 cluster.routing.allocation.same_shard.host 将被忽略。
    :param search_idle_after:
        How long a shard can not receive a search or get request until it’s considered search idle. (default is 30s)
        分片在被视为搜索空闲之前无法接收搜索或 get 请求的时间。（默认为 30 秒）
    :param refresh_interval:
        How often to perform a refresh operation, which makes recent changes to the index visible to search. Defaults
        to 1s. Can be set to -1 to disable refresh. If this setting is not explicitly set, shards that haven’t seen
        search traffic for at least index.search.idle.after seconds will not receive background refreshes until they
        receive a search request. Searches that hit an idle shard where a refresh is pending will wait for the next
        background refresh (within 1s). This behavior aims to automatically optimize bulk indexing in the default case
        when no searches are performed. In order to opt out of this behavior an explicit value of 1s should set as the
        refresh interval.
        执行刷新操作的频率，该操作使索引的最近更改对搜索可见。默认为 1s。可以设置为 -1 以禁用刷新。如果未明确设置此设置，则至少在
        index.search.idle.after 秒内未看到搜索流量的分片将不会收到后台刷新，直到收到搜索请求。命中等待刷新的空闲分片的搜索将等待下一次后
        台刷新（在 1 秒内）。此行为旨在在不执行搜索的默认情况下自动优化批量索引。为了选择退出此行为，应将显式值 1s 设置为刷新间隔。
    :param max_result_window:
        The maximum value of from + size for searches to this index. Defaults to 10000. Search requests take heap
        memory and time proportional to from + size and this limits that memory. See Scroll or Search After for a
        more efficient alternative to raising this.
        对此索引的搜索的最大值 from + size。默认为 10000。搜索请求占用堆内存和时间，与 from + 大小成正比，这会限制该内存。请参阅
        Scroll or Search After 以获取比提高此值更有效的替代方法。
    :param max_inner_result_window:
        The maximum value of from + size for inner hits definition and top hits aggregations to this index. Defaults
        to 100. Inner hits and top hits aggregation take heap memory and time proportional to from + size and this
        limits that memory.
        对于内部点击定义和到此索引的热门点击聚合，的最大值 from + size。默认值为 100。内部点击和顶部点击聚合占用堆内存和时间，与 from +
        大小成正比，这会限制该内存。
    :param max_rescore_window:
        The maximum value of window_size for rescore requests in searches of this index. Defaults to
        index.max_result_window which defaults to 10000. Search requests take heap memory and time proportional to
        max(window_size, from + size) and this limits that memory.
        搜索此索引时 rescore 请求的最大值 window_size。默认为 index.max_result_window，默认为 10000。搜索请求占用堆内存和时间，
        与 max（window_size， from + size） 成正比，这会限制该内存。
    :param max_docvalue_fields_search:
        The maximum number of docvalue_fields that are allowed in a query. Defaults to 100. Doc-value fields are
        costly since they might incur a per-field per-document seek.
        查询中允许的最大docvalue_fields数。默认值为 100。文档值字段的成本很高，因为它们可能会导致每个字段、每个文档的查找。
    :param max_script_fields:
        The maximum number of script_fields that are allowed in a query. Defaults to 32.
        查询中允许的最大script_fields数。默认值为 32。
    :param max_ngram_diff:
        The maximum allowed difference between min_gram and max_gram for NGramTokenizer and NGramTokenFilter.
        Defaults to 1.
        NGramTokenizer 和 NGramTokenFilter 的 min_gram 和 max_gram 之间允许的最大差值。默认值为 1。
    :param max_ngram_diff:
        The maximum allowed difference between max_shingle_size and min_shingle_size for the shingle token filter.
        Defaults to 3.
        max_shingle_size 和 min_shingle_size 之间允许的最大差值 shingle token 筛选器。默认值为 3。
    :param max_refresh_listeners:
        Maximum number of refresh listeners available on each shard of the index. These listeners are used to
        implement refresh=wait_for.
        索引的每个分片上可用的最大刷新侦听器数。这些侦听器用于实现 refresh=wait_for。
    :param analyze_max_token_count:
        The maximum number of tokens that can be produced using _analyze API. Defaults to 10000.
        使用 _analyze API 可以生成的最大令牌数。默认为 10000。
    :param highlight_max_analyzed_offset:
        The maximum number of characters that will be analyzed for a highlight request. This setting is only applicable
        when highlighting is requested on a text that was indexed without offsets or term vectors. Defaults to 1000000.
        将针对高亮请求进行分析的最大字符数。仅当在索引文本上请求突出显示时，该设置才适用，该文本没有偏移量或术语向量。默认为 1000000。
    :param max_terms_count:
        The maximum number of terms that can be used in Terms Query. Defaults to 65536.
        术语查询中可使用的最大术语数。默认为 65536。
    :param max_regex_length:
        The maximum length of regex that can be used in Regexp Query. Defaults to 1000.
        Regexp Query 中可以使用的 regex 的最大长度。默认值为 1000。
    :param query_default_field:
        (string or array of strings) Wildcard (*) patterns matching one or more fields. The following query types
        search these matching fields by default:
        Defaults to *, which matches all fields eligible for term-level queries, excluding metadata fields.
        （字符串或字符串数组）与一个或多个字段匹配的通配符 （*） 模式。默认情况下，以下查询类型会搜索这些匹配字段：
        默认为 *，它匹配符合术语级查询条件的所有字段，不包括元数据字段。
    :param routing_allocation_enable:
        Controls shard allocation for this index. It can be set to:
        控制此索引的分片分配。它可以设置为：
            all (default) - Allows shard allocation for all shards.
            all （默认） - 允许为所有分片分配分片。
            primaries - Allows shard allocation only for primary shards.
            primaryaries - 仅允许为主分片分配分片。
            new_primaries - Allows shard allocation only for newly-created primary shards.
            new_primaries – 仅允许为新创建的主分片分配分片。
            none - No shard allocation is allowed.
            none - 不允许分片分配。
    :param routing_allocation_enable:
        Enables shard rebalancing for this index. It can be set to:
        为此索引启用分片再平衡。它可以设置为：
            all (default) - Allows shard rebalancing for all shards.
            all （默认） - 允许对所有分片进行分片再平衡。
            primaries - Allows shard rebalancing only for primary shards.
            primaryaries - 仅允许对主分片进行分片再平衡。
            replicas - Allows shard rebalancing only for replica shards.
            replicas - 仅允许对副本分片进行分片再平衡。
            none - No shard rebalancing is allowed.
            none - 不允许分片重新平衡。
    :param gc_deletes:
        The length of time that a deleted document’s version number remains available for further versioned operations.
        Defaults to 60s.
        已删除文档的版本号仍可用于进一步版本控制操作的时间长度。默认为 60 秒。
    :param default_pipeline:
        Default ingest pipeline for the index. Index requests will fail if the default pipeline is set and the pipeline
        does not exist. The default may be overridden using the pipeline parameter. The special pipeline name _none
        indicates no ingest pipeline should be run.
        索引的默认引入管道。如果设置了 default 管道，但 pipeline 不存在，则索引请求将失败。可以使用 pipeline 参数覆盖默认值。
        特殊管道名称 _none 表示不应运行任何引入管道。
    :param final_pipeline:
        Final ingest pipeline for the index. Indexing requests will fail if the final pipeline is set and the pipeline
        does not exist. The final pipeline always runs after the request pipeline (if specified) and the default
        pipeline (if it exists). The special pipeline name _none indicates no ingest pipeline will run.
        索引的最终引入管道。如果设置了最终管道，但管道不存在，则索引请求将失败。最终管道始终在请求管道（如果指定）和默认管道（如果存在）之后运行。
        特殊管道名称 _none 表示不会运行任何引入管道。
    :param hidden:
        Indicates whether the index should be hidden by default. Hidden indices are not returned by default when using
        a wildcard expression. This behavior is controlled per request through the use of the expand_wildcards
        parameter. Possible values are true and false (default).
        指示默认情况下是否应隐藏索引。使用通配符表达式时，默认情况下不返回隐藏索引。此行为通过使用 expand_wildcards 参数按请求进行控制。
        可能的值为 true 和 false （default）。
    """
    def __init__(
            self,
            number_of_replicas: Union[str, int] = None,
            auto_expand_replicas: Union[str, bool] = None,
            search_idle_after: str = None,
            refresh_interval: Union[str, int] = None,
            max_result_window: int = None,
            max_inner_result_window: int = None,
            max_rescore_window: int = None,
            max_docvalue_fields_search: int = None,
            max_script_fields: int = None,
            max_ngram_diff: int = None,
            max_shingle_diff: int = None,
            max_refresh_listeners: int = None,
            analyze_max_token_count: int = None,
            highlight_max_analyzed_offset: int = None,
            max_terms_count: int = None,
            max_regex_length: int = None,
            query_default_field: Union[str, List[str], QueryType, List[QueryType]] = None,
            routing_allocation_enable: str = None,
            routing_rebalance_enable: str = None,
            gc_deletes: str = None,
            default_pipeline: str = None,
            final_pipeline: str = None,
            hidden: bool = None,
            **kwargs
    ):
        super().__init__(**kwargs)
        self._number_of_replicas: Union[str, int] = number_of_replicas
        self._auto_expand_replicas: Union[str, bool] = auto_expand_replicas
        self._search_idle_after: str = search_idle_after
        self._refresh_interval: Union[str, int] = refresh_interval
        self._max_result_window: int = max_result_window
        self._max_inner_result_window: int = max_inner_result_window
        self._max_rescore_window: int = max_rescore_window
        self._max_docvalue_fields_search: int = max_docvalue_fields_search
        self._max_script_fields: int = max_script_fields
        self._max_ngram_diff: int = max_ngram_diff
        self._max_shingle_diff: int = max_shingle_diff
        self._max_refresh_listeners: int = max_refresh_listeners
        self._analyze_max_token_count: int = analyze_max_token_count
        self._highlight_max_analyzed_offset: int = highlight_max_analyzed_offset
        self._max_terms_count: int = max_terms_count
        self._max_regex_length: int = max_regex_length
        self._query_default_field: Union[str, List[str], QueryType, List[QueryType]] = query_default_field
        self._routing_allocation_enable: Union[str, ShardAllocation] = routing_allocation_enable
        self._routing_rebalance_enable: Union[str, ShardRebalancing] = routing_rebalance_enable
        self._gc_deletes: str = gc_deletes
        self._default_pipeline: str = default_pipeline
        self._final_pipeline: str = final_pipeline
        self._hidden: bool = hidden
        return

    def _build(self) -> Dict:
        """
        构建结构体
        :return:
        """
        try:
            body: Dict = {} | super()._build()
        except NotImplementedError:
            body: Dict = {}
        if self._number_of_replicas:
            body["number_of_replicas"] = self._number_of_replicas
        if self._auto_expand_replicas:
            body["auto_expand_replicas"] = self._auto_expand_replicas
        if self._search_idle_after:
            if "search" not in body:
                body["search"] = {"idle": {}}
            if "idle" not in body["search"]:
                body["search"]["idle"] = {}
            body["search"]["idle"]["after"] = self._search_idle_after
        if self._refresh_interval:
            body["refresh_interval"] = self._refresh_interval
        if self._max_result_window:
            body["max_result_window"] = self._max_result_window
        if self._max_inner_result_window:
            body["max_inner_result_window"] = self._max_inner_result_window
        if self._max_rescore_window:
            body["max_rescore_window"] = self._max_rescore_window
        if self._max_docvalue_fields_search:
            body["max_docvalue_fields_search"] = self._max_docvalue_fields_search
        if self._max_script_fields:
            body["max_script_fields"] = self._max_script_fields
        if self._max_ngram_diff:
            body["max_ngram_diff"] = self._max_ngram_diff
        if self._max_shingle_diff:
            body["max_shingle_diff"] = self._max_shingle_diff
        if self._max_refresh_listeners:
            body["max_refresh_listeners"] = self._max_refresh_listeners
        if self._analyze_max_token_count:
            if "analyze" not in body:
                body["analyze"] = {}
            body["analyze"]["max_token_count"] = self._analyze_max_token_count
        if self._highlight_max_analyzed_offset:
            if "highlight" not in body:
                body["highlight"] = {}
            body["highlight"]["max_analyzed_offset"] = self._highlight_max_analyzed_offset
        if self._max_terms_count:
            body["max_terms_count"] = self._max_terms_count
        if self._max_regex_length:
            body["max_regex_length"] = self._max_regex_length
        if self._query_default_field:
            if "query" not in body:
                body["query"] = {}
            if isinstance(self._query_default_field, str):
                body["query"]["default_field"] = self._query_default_field
            elif isinstance(self._query_default_field, QueryType):
                body["query"]["default_field"] = self._query_default_field.value
            else:
                body["query"]["default_field"] = [
                    _qdf.value if isinstance(_qdf, QueryType) else _qdf
                    for _qdf in self._query_default_field
                ]
        if self._routing_allocation_enable:
            if "routing" not in body:
                body["routing"] = {"allocation": {}}
            if "allocation" not in body["routing"]:
                body["routing"]["allocation"] = {}
            body["routing"]["allocation"]["enable"] = (
                self._routing_allocation_enable
                if isinstance(self._routing_allocation_enable, str)
                else self._routing_allocation_enable.value
            )
        if self._routing_rebalance_enable:
            if "routing" not in body:
                body["routing"] = {"rebalance": {}}
            if "rebalance" not in body["routing"]:
                body["routing"]["rebalance"] = {}
            body["routing"]["rebalance"]["enable"] = (
                self._routing_rebalance_enable
                if isinstance(self._routing_rebalance_enable, str)
                else self._routing_rebalance_enable.value
            )
        if self._gc_deletes:
            body["gc_deletes"] = self._gc_deletes
        if self._default_pipeline:
            body["default_pipeline"] = self._default_pipeline
        if self._final_pipeline:
            body["final_pipeline"] = self._final_pipeline
        if self._hidden:
            body["hidden"] = self._hidden
        return body


class OtherIndexSetting(_Base):
    """
    其他索引设置
    """

    def __init__(self, analysis: Dict = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._analysis: Dict = analysis
        return

    def _build(self) -> Dict:
        """
        构建结构体
        :return:
        """
        try:
            body: Dict = {} | super()._build()
        except NotImplementedError:
            body: Dict = {}
        if self._analysis:
            body["analysis"] = self._analysis
        return body


class LifecycleManagementIndexSetting(_Base, ILMSetting):
    """
    索引生命周期管理设置
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/ilm-settings.html
    :param lifecycle_indexing_complete:
        (Dynamic, Boolean) Indicates whether or not the index has been rolled over. Automatically set to true when ILM
        completes the rollover action. You can explicitly set it to skip rollover. Defaults to false.
        （动态、布尔值）指示索引是否已滚动更新。当 ILM 完成滚动更新操作时，自动设置为 true。您可以将其显式设置为跳过翻转。默认为 false。
    :param lifecycle_name:
        (Dynamic, string) The name of the policy to use to manage the index. For information about how Elasticsearch
        applies policy changes, see Policy updates.
        （动态、字符串）用于管理索引的策略的名称。有关 Elasticsearch 如何应用策略更改的信息，请参阅策略更新。
    :param lifecycle_origination_date:
        (Dynamic, long) If specified, this is the timestamp used to calculate the index age for its phase transitions.
        Use this setting if you create a new index that contains old data and want to use the original creation date to
        calculate the index age. Specified as a Unix epoch value in milliseconds.
        （动态、长）如果指定，则此时间戳用于计算其相变的索引年龄。如果您创建包含旧数据的新索引，并希望使用原始创建日期来计算索引期限，
        请使用此设置。指定为 Unix 纪元值（以毫秒为单位）。
    :param lifecycle_parse_origination_date:
        (Dynamic, Boolean) Set to true to parse the origination date from the index name. This origination date is used
        to calculate the index age for its phase transitions. The index name must match the pattern
        ^.*-{date_format}-\\d+, where the date_format is yyyy.MM.dd and the trailing digits are optional. An index that
        was rolled over would normally match the full format, for example logs-2016.10.31-000002). If the index name
        doesn’t match the pattern, index creation fails.
        （动态、布尔值）设置为 true 可解析索引名称中的起始日期。此起始日期用于计算其相变的指数年龄。索引名称必须与模式
        ^.*-{date_format}-\\d+ 匹配，其中date_format为 yyyy。MM.dd 和尾随数字是可选的。滚动更新的索引通常与完整格式匹配，
        例如 logs-2016.10.31-000002）。如果索引名称与模式不匹配，则索引创建将失败。
    :param lifecycle_step_wait_time_threshold:
        (Dynamic, time value) Time to wait for the cluster to resolve allocation issues during an ILM shrink action.
        Must be greater than 1h (1 hour). Defaults to 12h (12 hours). See Shard allocation for shrink.
        （动态、时间值）在 ILM 收缩操作期间等待集群解决分配问题的时间。必须大于 1 小时 （1 小时）。默认为 12 小时（12 小时）。
        请参阅 收缩的分片分配。
    :param lifecycle_rollover_alias:
        (Dynamic, string) The index alias to update when the index rolls over. Specify when using a policy that
        contains a rollover action. When the index rolls over, the alias is updated to reflect that the index is no
        longer the write index. For more information about rolling indices, see Rollover.
        （动态、字符串）索引滚动更新时要更新的索引别名。指定何时使用包含翻转操作的策略。当索引滚动更新时，别名将更新以反映索引不再是写入索引。
        有关滚动索引的更多信息，请参阅滚动更新。
    """
    def __init__(
            self,
            lifecycle_indexing_complete: bool = None,
            lifecycle_name: str = None,
            lifecycle_origination_date: int = None,
            lifecycle_parse_origination_date: bool = None,
            lifecycle_step_wait_time_threshold: str = None,
            lifecycle_rollover_alias: str = None,
            **kwargs
    ):
        super().__init__(**kwargs)
        self._lifecycle_indexing_complete: bool = lifecycle_indexing_complete
        self._lifecycle_name: str = lifecycle_name
        self._lifecycle_origination_date: int = lifecycle_origination_date
        self._lifecycle_parse_origination_date: bool = lifecycle_parse_origination_date
        self._lifecycle_step_wait_time_threshold: str = lifecycle_step_wait_time_threshold
        self._lifecycle_rollover_alias: str = lifecycle_rollover_alias
        return

    def _build(self) -> Dict:
        body: Dict = {"lifecycle": {}}
        if self._lifecycle_indexing_complete:
            body["lifecycle"]["indexing_complete"] = self._lifecycle_indexing_complete
        if self._lifecycle_name:
            body["lifecycle"]["name"] = self._lifecycle_name
        if self._lifecycle_origination_date:
            body["lifecycle"]["origination_date"] = self._lifecycle_origination_date
        if self._lifecycle_parse_origination_date:
            body["lifecycle"]["parse_origination_date"] = self._lifecycle_parse_origination_date
        if self._lifecycle_step_wait_time_threshold:
            if "step" not in body["lifecycle"]:
                body["lifecycle"]["step"] = {}
            body["lifecycle"]["step"]["wait_time_threshold"] = self._lifecycle_step_wait_time_threshold
        if self._lifecycle_rollover_alias:
            body["lifecycle"]["rollover_alias"] = self._lifecycle_rollover_alias
        return body


class IndexSetting(StaticIndexSetting, DynamicIndexSetting, OtherIndexSetting):
    """
    索引设置
    """
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        return

    def _build(self) -> Dict:
        """
        构建结构体
        :return:
        """
        body: Dict = super()._build()
        return body









