"""
@author: 江同学呀
@file: field_attribute.py
@date: 2024/7/25 14:51
@desc: 
"""
from typing import Dict, List, Callable, Tuple, Any, Union, Optional, Iterable

from espc.common.field_common import IndexOptions, Similarity, TimeSeriesMetric, Dynamic, TimeFormat, Metrics, \
    TermVector, PrefixTree, Precision, Strategy, Orientation
from espc.err import ESPCInvalidValueRangeError, ESPCInvalidTypeError
from espc.orm.model.struct.fielddata_frequency_filter import FieldDataFrequencyFilter
from espc.orm.model.struct.index_prefixes import IndexPrefixes
from espc.orm.model.mapping.field.base_field.base_field import _BaseField
from espc.orm.model.mapping.field.base_field.base_string_field import _BaseStringField
from espc.orm.model.properties import Properties


class _Attribute:
    def __init__(self, *args, **kwargs):
        self._attr_method_list: List[Callable] = []
        return


class DocValuesAttribute(_Attribute):
    """
    文档值 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/doc-values.html

    Should the field be stored on disk in a column-stride fashion, so that it can later be used for sorting,
    aggregations, or scripting? Accepts true (default) or false.
    字段是否应该以列步幅方式存储在磁盘上，以便以后可以用于排序、聚合或脚本编写？接受 true （默认）或 false 。

    :param doc_values: 文档值
    """

    def __init__(self, doc_values: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._doc_values: bool = doc_values
        if self._doc_values is not None:
            self._attr_method_list.append(self._build_doc_value)
        return

    def _build_doc_value(self) -> Tuple[str, bool]:
        return "doc_values", self._doc_values


class StoreAttribute(_Attribute):
    """
    存储 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-store.html

    Whether the field value should be stored and retrievable separately from the _source field. Accepts true or false (
    default).
    字段值是否应与 _source 字段分开存储和检索。接受 true 或 false （默认）。

    :param store: 存储
    """
    def __init__(self, store: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._store: bool = store
        if self._store is not None:
            self._attr_method_list.append(self._build_store)
        return

    def _build_store(self) -> Tuple[str, bool]:
        return "store", self._store


class BoostAttribute(_Attribute):
    """
    权重 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-boost.html

    Mapping field-level query time boosting. Accepts a floating point number, defaults to 1.0.
    映射字段级查询时间提升。接受浮点数，默认为 1.0 。

    :param boost: 权重
    """
    def __init__(self, boost: float = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._boost: float = boost
        if self._boost is not None:
            self._attr_method_list.append(self._build_boost)
        return

    def _build_boost(self) -> Tuple[str, float]:
        return "boost", self._boost


class IndexAttribute(_Attribute):
    """
    索引 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-index.html

    Determines if the field should be searchable. Accepts true (default) or false.
    确定字段是否应可搜索。接受 true （默认）或 false 。

    :param index: 索引
    """
    def __init__(self, index: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._index: bool = index
        if self._index is not None:
            self._attr_method_list.append(self._build_index)
        return

    def _build_index(self) -> Tuple[str, bool]:
        return "index", self._index


class NullValueAttribute(_Attribute):
    """
    空值 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/null-value.html

    A string value which is substituted for any explicit null values within the flattened object field. Defaults to
    null, which means null sields are treated as if it were missing.
    一个字符串值，该值将替换扁平化对象字段中的任何显式 null 值。默认值为 null ，这意味着空 sields 将被视为缺失。

    :param null_value: 空值
    """
    def __init__(self, null_value: Any = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._null_value: float = null_value
        if self._null_value is not None:
            self._attr_method_list.append(self._build_null_value)
        return

    def _build_null_value(self) -> Tuple[str, Any]:
        return "null_value", self._null_value


class MetaAttribute(_Attribute):
    """
    元数据 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-field-meta.html

    Metadata about the field.
    有关字段的元数据。

    :param meta: 元数据
    """
    def __init__(self, meta: Dict = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._meta: Dict = meta
        if self._meta is not None:
            self._attr_method_list.append(self._build_meta)
        return

    def _build_meta(self) -> Tuple[str, Dict]:
        return "meta", self._meta


class EagerGlobalOrdinalsAttribute(_Attribute):
    """
    全局序号 属性

    Should global ordinals be loaded eagerly on refresh? Accepts true or false (default). Enabling this is a good idea
    on fields that are frequently used for terms aggregations.
    是否应该在刷新时急切地加载全局序数？接受 true 或 false （默认）。对于经常用于术语聚合的字段，启用此功能是一个好主意。

    :param eager_global_ordinals: 全局序号
    """
    def __init__(self, eager_global_ordinals: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._eager_global_ordinals: bool = eager_global_ordinals
        if self._eager_global_ordinals is not None:
            self._attr_method_list.append(self._build_eager_global_ordinals)
        return

    def _build_eager_global_ordinals(self) -> Tuple[str, bool]:
        return "eager_global_ordinals", self._eager_global_ordinals


class FieldsAttribute(_Attribute):
    """
    多字段 属性
    :param fields: 多字段
    """
    def __init__(self, fields: Union[_BaseStringField, List[_BaseStringField]] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._fields: List[_BaseStringField] = []
        if isinstance(fields, list):
            for field in fields:
                field: _BaseStringField
                # 此处意思为将字段名添加到属性中，使其可以被.出来
                setattr(self, field._field_name, field)
                field._set_parent_field(self)
            self._fields: List[_BaseStringField] = [slot for slot in fields]
        elif isinstance(fields, _BaseStringField):
            # 此处意思为将字段名添加到属性中，使其可以被.出来
            setattr(self, fields._field_name, fields)
            fields._set_parent_field(self)
            self._fields.append(fields)
        if self._fields is not None:
            self._attr_method_list.append(self._build_fields)
        return

    def _build_fields(self) -> Tuple[str, Dict[str, Dict]]:
        return "fields", {field.field_name: field._build() for field in self._fields} if self._fields else None


class IgnoreAboveAttribute(_Attribute):
    """
    超长忽略 属性

    Leaf values longer than this limit will not be indexed. By default, there is no limit and all values will be
    indexed. Note that this limit applies to the leaf values within the flattened object field, and not the length of
    the entire field.
    超过此限制的叶值将不会被索引。默认情况下，没有限制，所有值都将被索引。请注意，此限制适用于展平对象字段中的叶值，而不是整个字段的长度。

    :param ignore_above: 超长忽略
    """
    def __init__(self, ignore_above: int = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._ignore_above: int = ignore_above
        if self._ignore_above is not None:
            self._attr_method_list.append(self._build_ignore_above)
        return

    def _build_ignore_above(self) -> Tuple[str, int]:
        return "ignore_above", self._ignore_above


class IndexOptionsAttribute(_Attribute):
    """
    索引选项 属性

    What information should be stored in the index for scoring purposes. Defaults to docs but can also be set to freqs
    to take term frequency into account when computing scores.
    出于评分目的，应在索引中存储哪些信息。默认值为 docs but 也可以设置为 freqs 在计算分数时考虑术语频率。

    :param index_options: 索引选项
    """

    def __init__(self, index_options: Union[str, IndexOptions] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._index_options: Union[str, IndexOptions] = index_options
        if self._index_options is not None:
            self._attr_method_list.append(self._build_index_options)
        return

    def _build_index_options(self) -> Tuple[str, str]:
        return "index_options", self._index_options if isinstance(self._index_options, str) else self._index_options.value


class NormsAttribute(_Attribute):
    """
    规范 属性
    :param norms: 规范
    """

    def __init__(self, norms: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._norms: bool = norms
        if self._norms is not None:
            self._attr_method_list.append(self._build_norms)
        return

    def _build_norms(self) -> Tuple[str, bool]:
        return "norms", self._norms


class OnScriptErrorAttribute(_Attribute):
    """
    脚本错误 属性

    Defines what to do if the script defined by the script parameter throws an error at indexing time. Accepts fail (
    default), which will cause the entire document to be rejected, and continue, which will register the field in the
    document’s _ignored metadata field and continue indexing. This parameter can only be set if the script field is
    also set.
    定义如果 script 参数定义的脚本在索引时引发错误，该怎么办。Acceptance fail （默认值），这将导致整个文档被拒绝，以及 continue ，这将在
    文档的 _ignored 元数据字段中注册该字段并继续编制索引。仅当 script 字段也已设置时，才能设置此参数。

    :param on_script_error: 脚本错误
    """

    def __init__(self, on_script_error: str = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._on_script_error: str = on_script_error
        if self._on_script_error is not None:
            self._attr_method_list.append(self._build_on_script_error)
        return

    def _build_on_script_error(self) -> Tuple[str, str]:
        return "on_script_error", self._on_script_error


class ScriptAttribute(_Attribute):
    """
    脚本 属性

    If this parameter is set, then the field will index values generated by this script, rather than reading the values
    directly from the source. If a value is set for this field on the input document, then the document will be rejected
    with an error. Scripts are in the same format as their runtime equivalent, and should emit long-valued timestamps.
    如果设置了此参数，则字段将为此脚本生成的值编制索引，而不是直接从源读取值。如果在输入文档上为此字段设置了值，则该文档将被拒绝并显示错误。脚本
    的格式与其运行时等效项相同，并且应发出长值时间戳。

    :param script: 脚本
    """

    def __init__(self, script: str = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._script: str = script
        if self._script is not None:
            self._attr_method_list.append(self._build_script)
        return

    def _build_script(self) -> Tuple[str, str]:
        return "script", self._script


class TimeSeriesDimensionAttribute(_Attribute):
    """
    时间序列维度 属性

    For internal use by Elastic only.
    Marks the field as a time series dimension. Defaults to false.
    The index.mapping.dimension_fields.limit index setting limits the number of dimensions in an index.
    Dimension fields have the following constraints:

    - The doc_values and index mapping parameters must be true.
    - Field values cannot be an array or multi-value.

    仅供 Elastic 内部使用。
    将字段标记为时间序列维度。缺省值为 false .
    index.mapping.dimension_fields.limit 索引设置限制索引中的维度数。
    维度字段具有以下约束：

    - doc_values and index 映射参数必须是 true 。
    - 字段值不能是数组或多值。

    :param script: 脚本
    """

    def __init__(self, time_series_dimension: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._time_series_dimension: bool = time_series_dimension
        if self._time_series_dimension is not None:
            self._attr_method_list.append(self._build_time_series_dimension)
        return

    def _build_time_series_dimension(self) -> Tuple[str, bool]:
        return "time_series_dimension", self._time_series_dimension


class SimilarityAttribute(_Attribute):
    """
    相似性 属性

    Which scoring algorithm or similarity should be used. Defaults to BM25.
    应使用哪种评分算法或相似性。缺省值为 BM25 .

    :param similarity: 相似性
    """

    def __init__(self, similarity: Union[str, Similarity] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._similarity: Union[str, Similarity] = similarity
        if self._similarity is not None:
            self._attr_method_list.append(self._build_similarity)
        return

    def _build_similarity(self) -> Tuple[str, str]:
        return "similarity", self._similarity if isinstance(self._similarity, str) else self._similarity.value


class NormalizerAttribute(_Attribute):
    """
    归一化器 属性
    :param normalizer: 归一化器
    todo 等normalizer写完类之后再补
    """

    def __init__(self, normalizer: str = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._normalizer: str = normalizer
        if self._normalizer is not None:
            self._attr_method_list.append(self._build_normalizer)
        return

    def _build_normalizer(self) -> Tuple[str, str]:
        return "normalizer", self._normalizer


class CoerceAttribute(_Attribute):
    """
    清理脏值 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/coerce.html

    Try to convert strings to numbers and truncate fractions for integers. Accepts true (default) and false.
    尝试将字符串转换为数字并截断整数的分数。接受 true （默认）和 false 。
    :param coerce: 清理脏值
    """

    def __init__(self, coerce: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._coerce: bool = coerce
        if self._coerce is not None:
            self._attr_method_list.append(self._build_coerce)
        return

    def _build_coerce(self) -> Tuple[str, bool]:
        return "coerce", self._coerce


class IgnoreMalformedAttribute(_Attribute):
    """
    忽略异常格式 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/ignore-malformed.html

    If true, malformed numbers are ignored. If false (default), malformed numbers throw an exception and reject the
    whole document. Note that this cannot be set if the script parameter is used.
    如果 true ，则忽略格式错误的数字。如果 false （默认值），格式错误的数字将引发异常并拒绝整个文档。请注意，如果使用该 script 参数，
    则无法设置此参数。

    :param ignore_malformed: 忽略异常格式
    """

    def __init__(self, ignore_malformed: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._ignore_malformed: bool = ignore_malformed
        if self._ignore_malformed is not None:
            self._attr_method_list.append(self._build_ignore_malformed)
        return

    def _build_ignore_malformed(self) -> Tuple[str, bool]:
        return "ignore_malformed", self._ignore_malformed


class TimeSeriesMetricAttribute(_Attribute):
    """
    时间序列指标 属性

    For internal use by Elastic only.
    Marks the field as a time series metric. The value is the metric type. Defaults to null (Not a time series metric).
    For aggregate_metric_double fields, this parameter accepts counter, gauge, and summary. You can’t update this
    parameter for existing fields.

    仅供 Elastic 内部使用。
    将字段标记为时间序列指标。该值是指标类型。默认值为 null （Not a time series metric）。
    对于 aggregate_metric_double 字段，此参数接受 counter 、 gauge 和 summary 。您无法为现有字段更新此参数。

    :param time_series_metric: 时间序列指标
    """

    def __init__(self, time_series_metric: Union[str, TimeSeriesMetric] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._time_series_metric: Union[str, TimeSeriesMetric] = time_series_metric
        if self._time_series_metric is not None:
            self._attr_method_list.append(self._build_time_series_metric)
        return

    def _build_time_series_metric(self) -> Tuple[str, str]:
        return (
            "time_series_metric",
            self._time_series_metric if isinstance(self._time_series_metric, str) else self._time_series_metric.value
        )


class ScalingFactorAttribute(_Attribute):
    """
    比例因子 属性
    :param scaling_factor: 比例因子
    """

    def __init__(self, scaling_factor: float = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._scaling_factor: float = scaling_factor
        if self._scaling_factor is not None:
            self._attr_method_list.append(self._build_scaling_factor)
        return

    def _build_scaling_factor(self) -> Tuple[str, float]:
        return "scaling_factor", self._scaling_factor


class DynamicAttribute(_Attribute):
    """
    动态 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/dynamic.html

    Whether or not new properties should be added dynamically to an existing nested object. Accepts true (default),
    false and strict.
    是否应将 new properties 动态添加到现有嵌套对象中。接受 true （默认） false 和 strict 。

    :param dynamic: 动态
    """

    def __init__(self, dynamic: Union[str, bool, Dynamic] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._dynamic: Union[str, bool, Dynamic] = dynamic
        if self._dynamic is not None:
            self._attr_method_list.append(self._build_dynamic)
        return

    def _build_dynamic(self) -> Tuple[str, Union[str, bool]]:
        return "dynamic", self._dynamic.value if isinstance(self._dynamic, Dynamic) else self._dynamic


class EnabledAttribute(_Attribute):
    """
    启用 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/enabled.html

    Whether the JSON value given for the object field should be parsed and indexed (true, default) or completely
    ignored (false).
    为对象字段给定的 JSON 值是应被解析和索引 （ true ， 默认） 还是完全忽略 （ false ）。

    :param enabled: 启用
    """

    def __init__(self, enabled: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._enabled: bool = enabled
        if self._enabled is not None:
            self._attr_method_list.append(self._build_enabled)
        return

    def _build_enabled(self) -> Tuple[str, bool]:
        return "enabled", self._enabled


class PropertiesAttribute(_Attribute):
    """
    属性 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/properties.html

    The fields within the nested object, which can be of any data type, including nested. New properties may be added
    to an existing nested object.
    嵌套对象中的字段，可以是任何数据类型，包括 nested .可以将新属性添加到现有的嵌套对象中。

    :param properties: 属性
    """

    def __init__(
            self, properties: Union[Dict[str, Union[Dict, _BaseField]], Iterable[_BaseField]] = None, *args, **kwargs
    ):
        super().__init__(*args, **kwargs)
        self._properties: Union[Dict[str, Union[Dict, _BaseField]], Iterable[_BaseField]] = properties
        if self._properties is not None:
            self._attr_method_list.append(self._build_properties)
            if isinstance(self._properties, Dict):
                for key in self._properties.keys():
                    setattr(self, key, key)
            elif isinstance(self._properties, Iterable):
                for field in self._properties:
                    setattr(self, field._field_name, field)
                    if hasattr(field, "_set_parent_field"):
                        field._set_parent_field(self)
            else:
                raise ESPCInvalidTypeError("属性类型错误")
        return

    def _build_properties(self) -> Tuple[str, Dict]:
        return (
            "properties",
            {k: v if isinstance(v, Dict) else v._build() for k, v in self._properties.items()}
            if isinstance(self._properties, Dict)
            else {p._field_name: p._build() for p in self._properties}
        )


class DepthLimitAttribute(_Attribute):
    """
    最大允许深度 属性

    The maximum allowed depth of the flattened object field, in terms of nested inner objects. If a flattened object
    field exceeds this limit, then an error will be thrown. Defaults to 20. Note that depth_limit can be updated
    dynamically through the update mapping API.
    展平对象场的最大允许深度，以嵌套的内部对象为单位。如果展平的对象字段超过此限制，则会引发错误。缺省值为 20 .请注意， depth_limit
    可以通过更新映射 API 动态更新。

    :param depth_limit: 最大允许深度
    """

    def __init__(self, depth_limit: int = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._depth_limit: int = depth_limit
        if self._depth_limit is not None:
            self._attr_method_list.append(self._build_depth_limit)
        return

    def _build_depth_limit(self) -> Tuple[str, int]:
        return "depth_limit", self._depth_limit


class SplitQueriesOnWhitespaceAttribute(_Attribute):
    """
    全文查询是否拆分空格 属性

    Whether full text queries should split the input on whitespace when building a query for this field. Accepts true or
    false (default).
    在为此字段构建查询时，全文查询是否应拆分空格上的输入。接受 true 或 false （默认）。

    :param split_queries_on_whitespace: 全文查询是否拆分空格
    """

    def __init__(self, split_queries_on_whitespace: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._split_queries_on_whitespace: bool = split_queries_on_whitespace
        if self._split_queries_on_whitespace is not None:
            self._attr_method_list.append(self._build_split_queries_on_whitespace)
        return

    def _build_split_queries_on_whitespace(self) -> Tuple[str, bool]:
        return "split_queries_on_whitespace", self._split_queries_on_whitespace


class IncludeInParentAttribute(_Attribute):
    """
    添加到父文档 属性

    If true, all fields in the nested object are also added to the parent document as standard (flat) fields. Defaults
    to false.
    如果 true ，嵌套对象中的所有字段也会作为标准（平面）字段添加到父文档中。缺省值为 false .

    :param include_in_parent: 添加到父文档
    """

    def __init__(self, include_in_parent: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._include_in_parent: bool = include_in_parent
        if self._include_in_parent is not None:
            self._attr_method_list.append(self._build_include_in_parent)
        return

    def _build_include_in_parent(self) -> Tuple[str, bool]:
        return "include_in_parent", self._include_in_parent


class IncludeInRootAttribute(_Attribute):
    """
    添加到根文档 属性

    If true, all fields in the nested object are also added to the root document as standard (flat) fields. Defaults to
    false.
    如果 true ，嵌套对象中的所有字段也将作为标准（平面）字段添加到根文档中。缺省值为 false .

    :param include_in_root: 添加到根文档
    """

    def __init__(self, include_in_root: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._include_in_root: bool = include_in_root
        if self._include_in_root is not None:
            self._attr_method_list.append(self._build_include_in_root)
        return

    def _build_include_in_root(self) -> Tuple[str, bool]:
        return "include_in_root", self._include_in_root


class PathAttribute(_Attribute):
    """
    路径 属性

    :param path: 路径
    """

    def __init__(self, path: Union[str, _BaseField] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._path: Union[str, _BaseField] = path
        if self._path is not None:
            self._attr_method_list.append(self._build_path)
        return

    def _build_path(self) -> Tuple[str, str]:
        return "path", self._path._field_name if isinstance(self._path, _BaseField) else self._path


class FormatAttribute(_Attribute):
    """
    日期格式 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-date-format.html

    The date format(s) that can be parsed. Defaults to strict_date_optional_time||epoch_millis.
    可以分析的日期格式。缺省值为 strict_date_optional_time||epoch_millis .

    :param format_: 日期格式
    """

    def __init__(self, format_: Union[str, TimeFormat, List[Union[str, TimeFormat]]] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._format: Union[str, TimeFormat, List[Union[str, TimeFormat]]] = format_
        if self._format is not None:
            self._attr_method_list.append(self._build_format)
        return

    def _build_format(self) -> Tuple[str, str]:
        return "format", self._format if isinstance(self._format, str) else (
            self._format.value if isinstance(self._format, TimeFormat) else (
                "||".join(tf.value if isinstance(tf, TimeFormat) else tf for tf in self._format)
            )
        )


class LocaleAttribute(_Attribute):
    """
    区域设置 属性

    The locale to use when parsing dates since months do not have the same names and/or abbreviations in all languages.
    The default is the ROOT locale,
    分析日期时使用的区域设置，因为月份在所有语言中的名称和/或缩写并不相同。默认值为 ROOT 区域设置，

    :param locale: 日期格式
    """

    def __init__(self, locale: str = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._locale: str = locale
        if self._locale is not None:
            self._attr_method_list.append(self._build_locale)
        return

    def _build_locale(self) -> Tuple[str, str]:
        return "locale", self._locale


class MetricsAttribute(_Attribute):
    """
    指标子字段数组 属性

    Array of metric sub-fields to store. Each value corresponds to a metric aggregation. Valid values are min, max, sum,
    and value_count. You must specify at least one value.
    要存储的指标子字段的数组。每个值对应一个指标聚合。有效值为 min 、 、 max sum 和 value_count 。必须至少指定一个值。

    :param locale: 日期格式
    """

    def __init__(self, metrics: List[Union[str, Metrics]] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._metrics: List[Union[str, Metrics]] = metrics
        if self._metrics:
            self._attr_method_list.append(self._build_metrics)
        return

    def _build_metrics(self) -> Tuple[str, List[str]]:
        return "metrics", [m.value if isinstance(m, Metrics) else m for m in self._metrics]


class DefaultMetricsAttribute(_Attribute):
    """
    默认指标子字段 属性

    Default metric sub-field to use for queries, scripts, and aggregations that don’t use a sub-field. Must be a value
    from the metrics array.
    用于不使用子字段的查询、脚本和聚合的默认指标子字段。必须是数组中的 metrics 值。

    :param locale: 日期格式
    """

    def __init__(self, default_metric: Union[str, Metrics] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._default_metric: Union[str, Metrics] = default_metric
        if self._default_metric is not None:
            self._attr_method_list.append(self._build_default_metric)
        return

    def _build_default_metric(self) -> Tuple[str, str]:
        return (
            "default_metric",
            self._default_metric.value if isinstance(self._default_metric, Metrics) else self._default_metric
        )


class AnalyzerAttribute(_Attribute):
    """
    分析器 属性
    todo 分析器类编写

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analyzer.html

    The analyzer which should be used for the text field, both at index-time and at search-time (unless overridden by
    the search_analyzer). Defaults to the default index analyzer, or the standard analyzer.
    应在索引时和搜索时用于 text 字段的分析器（除非被 search_analyzer 覆盖）。默认为默认索引分析器或分析器 standard 。

    :param analyzer: 分析器
    """

    def __init__(self, analyzer: str = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._analyzer: str = analyzer
        if self._analyzer is not None:
            self._attr_method_list.append(self._build_analyzer)
        return

    def _build_analyzer(self) -> Tuple[str, str]:
        return "analyzer", self._analyzer


class FielddataAttribute(_Attribute):
    """
    字段是否可以使用内存中的字段数据进行排序 属性
    todo 研究fielddata

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/fielddata.html

    Can the field use in-memory fielddata for sorting, aggregations, or scripting? Accepts true or false (default).
    字段是否可以使用内存中的字段数据进行排序、聚合或编写脚本？接受 true 或 false （默认）。

    :param fielddata: 字段是否可以使用内存中的字段数据进行排序
    """

    def __init__(self, fielddata: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._fielddata: bool = fielddata
        if self._fielddata is not None:
            self._attr_method_list.append(self._build_fielddata)
        return

    def _build_fielddata(self) -> Tuple[str, bool]:
        return "fielddata", self._fielddata


class FielddataFrequencyFilterAttribute(_Attribute):
    """
    内存中加载哪些值 属性
    todo 研究fielddata_frequency_filter

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/text.html#field-data-filtering

    Expert settings which allow to decide which values to load in memory when fielddata is enabled. By default all
    values are loaded.
    专家设置，允许决定在启用时 fielddata 在内存中加载哪些值。默认情况下，将加载所有值。

    :param fielddata_frequency_filter: 内存中加载哪些值
    """

    def __init__(
            self, fielddata_frequency_filter: Union[Dict[str, Union[int, float]], FieldDataFrequencyFilter] = None,
            *args, **kwargs
    ):
        super().__init__(*args, **kwargs)
        self._fielddata_frequency_filter: Union[Dict[str, Union[int, float]], FieldDataFrequencyFilter] = (
            fielddata_frequency_filter)
        if self._fielddata_frequency_filter:
            self._attr_method_list.append(self._build_fielddata_frequency_filter)
        return

    def _build_fielddata_frequency_filter(self) -> Tuple[str, Dict[str, Union[int, float]]]:
        return (
            "fielddata_frequency_filter",
            self._fielddata_frequency_filter._build()
            if isinstance(self._fielddata_frequency_filter, FieldDataFrequencyFilter)
            else self._fielddata_frequency_filter
        )


class IndexPrefixesAttribute(_Attribute):
    """
    前缀搜索 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/index-prefixes.html

    If enabled, term prefixes of between 2 and 5 characters are indexed into a separate field. This allows prefix
    searches to run more efficiently, at the expense of a larger index.
    如果启用，则 2 到 5 个字符之间的术语前缀将索引到单独的字段中。这使得前缀搜索可以更高效地运行，但会牺牲更大的索引。

    :param fielddata_frequency_filter: 前缀搜索
    """

    def __init__(
            self, index_prefixes: Union[Dict[str, float], IndexPrefixes] = None,
            *args, **kwargs
    ):
        super().__init__(*args, **kwargs)
        self._index_prefixes: Union[Dict[str, float], IndexPrefixes] = index_prefixes
        if self._index_prefixes:
            self._attr_method_list.append(self._build_index_prefixes)
        return

    def _build_index_prefixes(self) -> Tuple[str, Dict[str, float]]:
        return (
            "index_prefixes",
            self._index_prefixes._build() if isinstance(self._index_prefixes, IndexPrefixes) else self._index_prefixes
        )


class IndexPhrasesAttribute(_Attribute):
    """
    短语索引 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/index-phrases.html

    If enabled, two-term word combinations (shingles) are indexed into a separate field. This allows exact phrase
    queries (no slop) to run more efficiently, at the expense of a larger index. Note that this works best when
    stopwords are not removed, as phrases containing stopwords will not use the subsidiary field and will fall back
    to a standard phrase query. Accepts true or false (default).
    如果启用，则两个术语的单词组合（带状疱疹）将索引到单独的字段中。这允许精确短语查询（无 slop）更有效地运行，但代价是索引更大。请注意，
    在不删除非索引词的情况下，这效果最佳，因为包含非索引词的短语将不会使用子字段，并且会回退到标准短语查询。接受 true 或 false （默认）。

    :param index_phrases: 短语索引
    """

    def __init__(self, index_phrases: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._index_phrases: bool = index_phrases
        if self._index_phrases:
            self._attr_method_list.append(self._build_index_phrases)
        return

    def _build_index_phrases(self) -> Tuple[str, bool]:
        return "index_phrases", self._index_phrases


class PositionIncrementGapAttribute(_Attribute):
    """
    应插入到字符串数组的每个元素之间的假术语位置的数量 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/position-increment-gap.html

    The number of fake term position which should be inserted between each element of an array of strings. Defaults to
    the position_increment_gap configured on the analyzer which defaults to 100. 100 was chosen because it prevents
    phrase queries with reasonably large slops (less than 100) from matching terms across field values.
    应插入到字符串数组的每个元素之间的假术语位置的数量。默认为分析器上配置的 position_increment_gap ，默认为 100 。 100 之所以选择它，
    是因为它可以防止具有相当大的斜角（小于 100）的短语查询在字段值之间匹配术语。

    :param position_increment_gap: 应插入到字符串数组的每个元素之间的假术语位置的数量
    """

    def __init__(self, position_increment_gap: int = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._position_increment_gap: int = position_increment_gap
        if self._position_increment_gap:
            self._attr_method_list.append(self._build_position_increment_gap)
        return

    def _build_position_increment_gap(self) -> Tuple[str, int]:
        return "position_increment_gap", self._position_increment_gap


class SearchAnalyzerAttribute(_Attribute):
    """
    搜索分析器 属性
    todo 分析器类编写

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-analyzer.html

    The analyzer that should be used at search time when a phrase is encountered. Defaults to the search_analyzer
    setting.
    当遇到短语时，应在搜索时使用的 analyzer that。默认为该 search_analyzer 设置。

    :param search_analyzer: 搜索分析器
    """

    def __init__(self, search_analyzer: str = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._search_analyzer: str = search_analyzer
        if self._search_analyzer is not None:
            self._attr_method_list.append(self._build_search_analyzer)
        return

    def _build_search_analyzer(self) -> Tuple[str, str]:
        return "search_analyzer", self._search_analyzer


class SearchQuoteAnalyzerAttribute(_Attribute):
    """
    搜索短语分析器 属性
    todo 分析器类编写

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analyzer.html#search-quote-analyzer

    The analyzer that should be used at search time when a phrase is encountered. Defaults to the search_analyzer
    setting.
    当遇到短语时，应在搜索时使用的 analyzer that。默认为该 search_analyzer 设置。

    :param search_quote_analyzer: 搜索短语分析器
    """

    def __init__(self, search_quote_analyzer: str = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._search_quote_analyzer: str = search_quote_analyzer
        if self._search_quote_analyzer is not None:
            self._attr_method_list.append(self._build_search_quote_analyzer)
        return

    def _build_search_quote_analyzer(self) -> Tuple[str, str]:
        return "search_quote_analyzer", self._search_quote_analyzer


class TermVectorAttribute(_Attribute):
    """
    是否应为字段存储术语向量 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/term-vector.html

    Whether term vectors should be stored for the field. Defaults to no.
    是否应为字段存储术语向量。默认值为 no 。

    :param term_vector: 是否应为字段存储术语向量
    """

    def __init__(self, term_vector: Union[str, TermVector] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._term_vector: Union[str, TermVector] = term_vector
        if self._term_vector is not None:
            self._attr_method_list.append(self._build_term_vector)
        return

    def _build_term_vector(self) -> Tuple[str, str]:
        return (
            "term_vector", self._term_vector.value if isinstance(self._term_vector, TermVector) else self._term_vector
        )


class PreserveSeparatorsAttribute(_Attribute):
    """
    保留分隔符 属性

    Preserves the separators, defaults to true. If disabled, you could find a field starting with Foo Fighters, if you
    suggest for foof.
    保留分隔符，默认为 true .如果禁用，则如果建议 ，则可以找到以 Foo Fighters 开头的 foof 字段。

    :param preserve_separators: 保留分隔符
    """

    def __init__(self, preserve_separators: Union[str, bool] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._preserve_separators: Union[str, bool] = preserve_separators
        if self._preserve_separators is not None:
            self._attr_method_list.append(self._build_preserve_separators)
        return

    def _build_preserve_separators(self) -> Tuple[str, Union[str, bool]]:
        return "preserve_separators", self._preserve_separators


class PreservePositionIncrementsAttribute(_Attribute):
    """
    启用位置增量 属性

    Enables position increments, defaults to true. If disabled and using stopwords analyzer, you could get a field
    starting with The Beatles, if you suggest for b. Note: You could also achieve this by indexing two inputs, Beatles
    and The Beatles, no need to change a simple analyzer, if you are able to enrich your data.
    启用位置增量，默认为 true 。如果禁用并使用非索引词分析器，则可以获取一个以 The Beatles 开头的字段，如果您建议 b 。注意：您也可以通过
    索引两个输入来实现此目的， Beatles 并且 The Beatles ，如果您能够丰富数据，则无需更改简单的分析器。

    :param preserve_position_increments: 启用位置增量
    """

    def __init__(self, preserve_position_increments: Union[str, bool] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._preserve_position_increments: Union[str, bool] = preserve_position_increments
        if self._preserve_position_increments is not None:
            self._attr_method_list.append(self._build_preserve_position_increments)
        return

    def _build_preserve_position_increments(self) -> Tuple[str, Union[str, bool]]:
        return "preserve_position_increments", self._preserve_position_increments


class MaxInputLengthAttribute(_Attribute):
    """
    限制单个输入的长度 属性

    Limits the length of a single input, defaults to 50 UTF-16 code points. This limit is only used at index time to
    reduce the total number of characters per input string in order to prevent massive inputs from bloating the
    underlying datastructure. Most use cases won’t be influenced by the default value since prefix completions seldom
    grow beyond prefixes longer than a handful of characters.
    限制单个输入的长度，默认为 50 UTF-16 代码点。此限制仅在索引时使用，以减少每个输入字符串的字符总数，以防止大量输入使底层数据结构膨胀。
    大多数用例不会受到默认值的影响，因为前缀完成时间很少超过前缀，长度超过少数字符。

    :param max_input_length: 限制单个输入的长度
    """

    def __init__(self, max_input_length: int = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._max_input_length: int = max_input_length
        if self._max_input_length is not None:
            self._attr_method_list.append(self._build_max_input_length)
        return

    def _build_max_input_length(self) -> Tuple[str, int]:
        return "max_input_length", self._max_input_length


class EnablePositionIncrementsAttribute(_Attribute):
    """
    指示是否应计算位置增量 属性

    Indicates if position increments should be counted. Set to false if you don’t want to count tokens removed by
    analyzer filters (like stop). Defaults to true.
    指示是否应计算位置增量。如果不想对分析器筛选器（如 stop）删除的令牌进行计数，请设置为 false。默认值为 true。

    :param enable_position_increments: 指示是否应计算位置增量
    """

    def __init__(self, enable_position_increments: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._enable_position_increments: bool = enable_position_increments
        if self._enable_position_increments is not None:
            self._attr_method_list.append(self._build_enable_position_increments)
        return

    def _build_enable_position_increments(self) -> Tuple[str, bool]:
        return "enable_position_increments", self._enable_position_increments


class MaxShingleSizeAttribute(_Attribute):
    """
    最大瓦片尺寸 属性

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-as-you-type.html

    Largest shingle size to create. Valid values are 2 (inclusive) to 4 (inclusive). Defaults to 3.
    A subfield is created for each integer between 2 and this value. For example, a value of 3 creates two subfields:
    my_field._2gram and my_field._3gram
    More subfields enables more specific queries but increases index size.

    要创建的最大瓦片尺寸。有效值为 2 （含）到 4 （含）。缺省值为 3 .
    将为 和 此值之间的 2 每个整数创建一个子字段。例如，值 of 3 会创建两个子字段： my_field._2gram 和 my_field._3gram
    子字段越多，查询就越具体，但索引大小也会增加。

    :param max_shingle_size: 最大瓦片尺寸
    """

    def __init__(self, max_shingle_size: int = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        if max_shingle_size and (max_shingle_size > 4 or max_shingle_size < 2):
            max_shingle_size: Optional[int] = None
        self._max_shingle_size: int = max_shingle_size
        if self._max_shingle_size is not None:
            self._attr_method_list.append(self._build_max_shingle_size)
        return

    def _build_max_shingle_size(self) -> Tuple[str, int]:
        return "max_shingle_size", self._max_shingle_size


class DimsAttribute(_Attribute):
    """
    向量维数 属性

    the number of dimensions in the vector, required parameter.
    向量中的维数，必需参数。

    :param dims: 向量维数
    """

    def __init__(self, dims: int = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        if dims is None:
            raise ESPCInvalidValueRangeError(f"必须填值。")
        self._dims: int = dims
        if self._dims is not None:
            self._attr_method_list.append(self._build_dims)
        return

    def _build_dims(self) -> Tuple[str, int]:
        return "dims", self._dims


class PositiveScoreImpactAttribute(_Attribute):
    """
    正向得分影响 属性

    :param positive_score_impact: 正向得分影响
    """

    def __init__(self, positive_score_impact: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._positive_score_impact: bool = positive_score_impact
        if self._positive_score_impact is not None:
            self._attr_method_list.append(self._build_positive_score_impact)
        return

    def _build_positive_score_impact(self) -> Tuple[str, bool]:
        return "positive_score_impact", self._positive_score_impact


class IgnoreZValueAttribute(_Attribute):
    """
    忽略第三维度 属性

    If true (default) three dimension points will be accepted (stored in source) but only latitude and longitude
    values will be indexed; the third dimension is ignored. If false, geopoints containing any more than latitude
    and longitude (two dimensions) values throw an exception and reject the whole document. Note that this cannot
    be set if the script parameter is used.
    如果为 true（默认值），则将接受三个维度点（存储在源中），但仅对纬度和经度值进行索引;第三个维度被忽略。如果为 false，则包含的地理点包含
    的纬度和经度（二维）值将引发异常并拒绝整个文档。请注意，如果使用 script 参数，则无法设置此参数。

    :param ignore_z_value: 忽略第三维度
    """

    def __init__(self, ignore_z_value: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._ignore_z_value: bool = ignore_z_value
        if self._ignore_z_value is not None:
            self._attr_method_list.append(self._build_ignore_z_value)
        return

    def _build_ignore_z_value(self) -> Tuple[str, bool]:
        return "ignore_z_value", self._ignore_z_value


class OrientationAttribute(_Attribute):
    """
    顶点顺序 属性

    Optionally define how to interpret vertex order for polygons / multipolygons. This parameter defines one of two
    coordinate system rules (Right-hand or Left-hand) each of which can be specified in three different ways.
        1. Right-hand rule: right, ccw, counterclockwise,
        2. Left-hand rule: left, cw, clockwise. The default orientation (counterclockwise) complies with the OGC
            standard which defines outer ring vertices in counterclockwise order with inner ring(s) vertices (holes)
            in clockwise order. Setting this parameter in the geo_shape mapping explicitly sets vertex order for the
            coordinate list of a geo_shape field but can be overridden in each individual GeoJSON or WKT document.
    定义如何解释多边形/多多边形的顶点顺序。此参数定义了两个坐标系规则（右手或左手）之一，每个坐标系规则都可以通过三种不同的方式指定。
        1. 右手法则：右，逆时针，
        2.左手法则：左、顺时针、顺时针。默认方向（逆时针）符合 OGC 标准，该标准按逆时针顺序定义外环顶点，按顺时针顺序定义内环顶点（孔）。
            在geo_shape映射中设置此参数会显式设置geo_shape字段坐标列表的顶点顺序，但可以在每个单独的 GeoJSON 或 WKT 文档中覆盖。

    :param orientation: 顶点顺序
    """

    def __init__(self, orientation: Union[str, Orientation] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._orientation: Union[str, Orientation] = orientation
        if self._orientation is not None:
            self._attr_method_list.append(self._build_orientation)
        return

    def _build_orientation(self) -> Tuple[str, str]:
        return (
            "orientation", self._orientation.value if isinstance(self._orientation, Orientation) else self._orientation
        )


class TreeAttribute(_Attribute):
    """
    树 属性

    [6.6] Deprecated in 6.6. PrefixTrees no longer used
    Name of the PrefixTree implementation to be used: geohash for GeohashPrefixTree and quadtree for QuadPrefixTree.
    Note: This parameter is only relevant for term and recursive strategies.

    在 6.6 中已弃用。PrefixTrees 不再使用
    PrefixTree 的名称 要使用的实现：GeohashPrefixTree 和 quadtree 的 geohash 用于 QuadPrefixTree。注意：此参数仅与术语和 递归策略。

    :param tree: 树
    """

    def __init__(self, tree: Union[str, PrefixTree] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._tree: Union[str, PrefixTree] = tree
        if self._tree is not None:
            self._attr_method_list.append(self._build_tree)
        return

    def _build_tree(self) -> Tuple[str, str]:
        return "tree", self._tree.value if isinstance(self._tree, PrefixTree) else self._tree


class PrecisionAttribute(_Attribute):
    """
    精度 属性

    [6.6] Deprecated in 6.6. PrefixTrees no longer used
    This parameter may be used instead of tree_levels to set an appropriate value for the tree_levels parameter. The
    value specifies the desired precision and Elasticsearch will calculate the best tree_levels value to honor this
    precision. The value should be a number followed by an optional distance unit. Valid distance units include: in,
    inch, yd, yard, mi, miles, km, kilometers, m,meters, cm,centimeters, mm, millimeters. Note: This parameter is only
    relevant for term and recursive strategies.

    在 6.6 中已弃用。PrefixTrees 不再使用
    此参数可能 用于代替 tree_levels 为 设置适当的值 tree_levels参数。该值指定所需的精度和 Elasticsearch 将计算出最佳tree_levels值来
    满足此要求 精度。该值应该是一个数字，后跟一个可选的距离 单位。有效距离单位包括：in、inch、yd、yard、mi、 英里、公里、公里、米、米、厘米、
    厘米、毫米、 毫米。注意：此参数仅与术语和 递归策略。

    :param precision: 精度
    """

    def __init__(self, precision: Union[str, Precision] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._precision: Union[str, Precision] = precision
        if self._precision is not None:
            self._attr_method_list.append(self._build_precision)
        return

    def _build_precision(self) -> Tuple[str, str]:
        return "precision", self._precision.value if isinstance(self._precision, Precision) else self._precision


class TreeLevelsAttribute(_Attribute):
    """
    树的层数 属性

    [6.6] Deprecated in 6.6. PrefixTrees no longer used
    Maximum number of layers to be used by the PrefixTree. This can be used to control the precision of shape
    representations andtherefore how many terms are indexed. Defaults to the default value of the chosen PrefixTree
    implementation. Since this parameter requires a certain level of understanding of the underlying implementation,
    users may use the precision parameter instead. However, Elasticsearch only uses the tree_levels parameter
    internally and this is what is returned via the mapping API even if you use the precision parameter.
    Note: This parameter is only relevant for term and recursive strategies.

    在 6.6 中已弃用。PrefixTrees 不再使用
    最大数量 PrefixTree 要使用的层。这可用于控制 形状表示的精度，因此有多少个术语 索引。默认为所选 PrefixTree 的默认值 实现。由于此参数
    需要一定程度的 了解底层实现，用户可以使用 precision 参数。但是，Elasticsearch 仅使用 在内部tree_levels参数，这是通过 映射 API，
    即使您使用 precision 参数。注意：此参数 仅与术语策略和递归策略相关。

    :param tree_levels: 树的层数
    """

    def __init__(self, tree_levels: int = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._tree_levels: int = tree_levels
        if self._tree_levels is not None:
            self._attr_method_list.append(self._build_tree_levels)
        return

    def _build_tree_levels(self) -> Tuple[str, int]:
        return "tree_levels", self._tree_levels


class StrategyAttribute(_Attribute):
    """
    策略 属性

    [6.6] Deprecated in 6.6. PrefixTrees no longer used
    The strategy parameter defines the approach for how to represent shapes at indexing and search time. It also
    influences the capabilities available so it is recommended to let Elasticsearch set this parameter automatically.
    There are two strategies available: recursive, and term. Recursive and Term strategies are deprecated and will be
    removed in a future version. While they are still available, the Term strategy supports point types only (the
    points_only parameter will be automatically set to true) while Recursive strategy supports all shape types.
    (IMPORTANT: see Prefix trees for more detailed information about these strategies)

    在 6.6 中已弃用。PrefixTrees 不再使用
    战略 参数定义了如何在索引时表示形状的方法 和搜索时间。它还会影响可用的功能，因此 建议让 Elasticsearch 自动设置此参数。 有两种策略可用：
    递归策略和术语策略。 递归策略和术语策略已弃用，并将在 未来版本。虽然它们仍然可用，但 Term 策略 仅支持点类型（points_only参数将为 自动
    设置为 true），而递归策略支持所有 形状类型。（重要提示：请参阅前缀树了解更多信息 有关这些策略的详细信息）

    :param strategy: 策略
    """

    def __init__(self, strategy: Union[str, Strategy] = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._strategy: Union[str, Strategy] = strategy
        if self._strategy is not None:
            self._attr_method_list.append(self._build_strategy)
        return

    def _build_strategy(self) -> Tuple[str, str]:
        return "strategy", self._strategy.value if isinstance(self._strategy, Strategy) else self._strategy


class DistanceErrorPctAttribute(_Attribute):
    """
    距离误差百分比 属性

    [6.6] Deprecated in 6.6. PrefixTrees no longer used
    Used as a hint to the PrefixTree about how precise it should be. Defaults to 0.025 (2.5%) with 0.5 as the maximum
    supported value. PERFORMANCE NOTE: This value will default to 0 if a precision or tree_level definition is
    explicitly defined. This guarantees spatial precision at the level defined in the mapping. This can lead to
    significant memory usage for high resolution shapes with low error (e.g., large shapes at 1m with < 0.001 error).
    To improve indexing performance (at the cost of query accuracy) explicitly define tree_level or precision along
    with a reasonable distance_error_pct, noting that large shapes will have greater false positives. Note: This
    parameter is only relevant for term and recursive strategies.

    在 6.6 中已弃用。PrefixTrees 不再使用
    用作 向 PrefixTree 提示它应该有多精确。默认值为 0.025 （2.5%） 以 0.5 为最大支持值。性能说明：此值将 如果显式定义了精度或tree_level
    定义，则默认值为 0。 这保证了映射中定义的级别的空间精度。这可以 导致高分辨率形状的大量内存使用，误差低 （例如，误差为 1m <的大形状误差为
    0.001）。提高索引性能 （以查询准确性为代价）显式定义tree_level或精度 以及合理的distance_error_pct，注意大形状将具有 误报率更高。
    注意：此参数仅与术语和 递归策略。

    :param distance_error_pct: 距离误差百分比
    """

    def __init__(self, distance_error_pct: float = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._distance_error_pct: float = distance_error_pct
        if self._distance_error_pct is not None:
            self._attr_method_list.append(self._build_distance_error_pct)
        return

    def _build_distance_error_pct(self) -> Tuple[str, float]:
        return "distance_error_pct", self._distance_error_pct


class PointsOnlyAttribute(_Attribute):
    """
    只有点 属性

    [6.6] Deprecated in 6.6. PrefixTrees no longer used
    Setting this option to true (defaults to false) configures the geo_shape field type for point shapes only
    (NOTE: Multi-Points are not yet supported). This optimizes index and search performance for the geohash and
    quadtree when it is known that only points will be indexed. At present geo_shape queries can not be executed
    on geo_point field types. This option bridges the gap by improving point performance on a geo_shape field so
    that geo_shape queries are optimal on a point only field.

    在 6.6 中已弃用。PrefixTrees 不再使用
    将此选项设置为 true（默认为 false）配置点的geo_shape字段类型 仅限形状（注意：尚不支持多点）。这优化了索引和 GeoHash 和 Quadtree
    的搜索性能，当它已知时，只有点 将被编入索引。目前geo_shape查询无法在geo_point上执行 字段类型。此选项通过提高 a 上的点性能来弥合差距。
    geo_shape字段，以便geo_shape查询在仅点字段上处于最佳状态。

    :param points_only: 只有点
    """

    def __init__(self, points_only: bool = None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._points_only: bool = points_only
        if self._points_only is not None:
            self._attr_method_list.append(self._build_points_only)
        return

    def _build_points_only(self) -> Tuple[str, bool]:
        return "points_only", self._points_only














