"""
@author: 江同学呀
@file: bucket_aggregation.py
@date: 2024/7/25 11:54
@desc:
    存储桶聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket.html

    Bucket aggregations don’t calculate metrics over fields like the metrics aggregations do, but instead, they create
    buckets of documents. Each bucket is associated with a criterion (depending on the aggregation type) which
    determines whether or not a document in the current context "falls" into it. In other words, the buckets
    effectively define document sets. In addition to the buckets themselves, the bucket aggregations also compute and
    return the number of documents that "fell into" each bucket.
    Bucket aggregations, as opposed to metrics aggregations, can hold sub-aggregations. These sub-aggregations will be
    aggregated for the buckets created by their "parent" bucket aggregation.
    There are different bucket aggregators, each with a different "bucketing" strategy. Some define a single bucket,
    some define fixed number of multiple buckets, and others dynamically create the buckets during the aggregation
    process.
    存储桶聚合不会像指标聚合那样计算字段的指标，而是创建文档存储桶。每个存储桶都与一个标准（取决于聚合类型）相关联，该标准确定当前上下文中的文档
    是否“落入”其中。换句话说，存储桶有效地定义了文档集。除了存储桶本身之外，存储桶聚合还计算并返回“落入”每个存储桶的文档数。
    与指标聚合相反，存储桶聚合可以保存子聚合。这些子聚合将针对其 “父” 存储桶聚合创建的存储桶进行聚合。
    有不同的存储桶聚合器，每个聚合器都有不同的 “bucketing” 策略。有些定义单个存储桶，有些定义多个存储桶的固定数量，而另一些则在聚合过程中动态创建存储桶。

    todo
"""
from typing import Dict, Any, List, Union, TYPE_CHECKING, Iterable, Literal, Type

from espc.common.common import Number
from espc.common.field_common import TimeFormat
from espc.orm.model.base.base import _Base

from espc.common.agg_common import CollectMode, ExecutionHint, MinimumInterval, OrderType, CalendarInterval, \
    FixedInterval
from espc.orm.model.struct.geo import PointType, Bounds
from espc.orm.model.mapping.field.base_field.base_geo_field import _BaseGeoField
from espc.orm.model.mapping.field.base_field.base_time_field import _BaseTimeField
from espc.orm.model.struct.aggs import Order, Range as _Range, IPRanges
from espc.orm.model.dsl.queries.base_queries import _BaseQueries
from espc.orm.model.mapping.field.base_field.base_field import _BaseField
from espc.orm.model.aggregation.aggregation import Aggregation
from espc.orm.model.text_analysis.analyzer import BaseAnalyzer


class AdjacencyMatrix(Aggregation):
    """
    邻接矩阵聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-adjacency-matrix-aggregation.html

    A bucket aggregation returning a form of adjacency matrix. The request provides a collection of named filter
    expressions, similar to the filters aggregation request. Each bucket in the response represents a non-empty cell
    in the matrix of intersecting filters.
    返回一种邻接矩阵形式的存储桶聚合。该请求提供命名筛选条件表达式的集合，类似于 filters 聚合请求。响应中的每个存储桶都表示相交筛选条件矩阵中
    的一个非空单元格。

    :param filters:
        (Required, object) Filters used to create buckets.
        （必需，对象）用于创建存储桶的筛选条件。
    :param separator:
        (Optional, string) Separator used to concatenate filter names. Defaults to &.
        （可选，字符串）用于连接筛选条件名称的分隔符。默认为 &.
    """
    type: str = "adjacency_matrix"

    def __init__(self, filters: Iterable[_BaseQueries], separator: str = None):
        super().__init__()
        self._filters: Iterable[_BaseQueries] = filters
        self._separator: str = separator
        return

    def _build(self) -> Dict[str, Any]:
        _body: Dict[str, Any] = super()._build()
        _body["filters"] = {_filter.type: _filter._build() for _filter in self._filters}
        if self._separator:
            _body["separator"] = self._separator
        return _body


class AutoOverHistogram(Aggregation):
    """
    自动间隔日期直方图聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-autodatehistogram-aggregation.html

    A multi-bucket aggregation similar to the Date histogram except instead of providing an interval to use as the width
    of each bucket, a target number of buckets is provided indicating the number of buckets needed and the interval of
    the buckets is automatically chosen to best achieve that target. The number of buckets returned will always be less
    than or equal to this target number.
    The buckets field is optional, and will default to 10 buckets if not specified.

    类似于日期直方图的多存储桶聚合，不同之处在于，它不是提供用作每个存储桶宽度的间隔，而是提供目标存储桶数，指示所需的存储桶数，并自动选择存储桶的
    间隔以最好地实现该目标。返回的存储桶数将始终小于或等于此目标数字。
    buckets 字段是可选的，如果未指定，则默认为 10 个 bucket。

    :param field:
    :param buckets:
    :param format_:
    :param time_zone:
    :param minimum_interval:
    :param missing:
    :param :
    """
    type: str = "auto_date_histogram"

    def __init__(
            self, field: Union[_BaseField, str], buckets: int = None, format_: Union[str, TimeFormat] = None,
            time_zone: str = None, missing: str = None,
            minimum_interval: Union[MinimumInterval, Literal["year", "month", "day", "hour", "minute", "second"]] = None
    ):
        super().__init__()
        self._field: _BaseField = field
        self._buckets: int = buckets
        self._format: str = format_
        self._time_zone: str = time_zone
        self._minimum_interval: Union[
            MinimumInterval,
            Literal["year", "month", "day", "hour", "minute", "second"],
            None
        ] = minimum_interval
        self._missing: str = missing
        return

    def _build(self) -> Dict[str, Any]:
        _body: Dict[str, Any] = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name
        }
        if self._buckets is not None:
            _body["buckets"] = self._buckets
        if self._format:
            _body["format"] = self._format
        if self._time_zone:
            _body["time_zone"] = self._time_zone
        if self._minimum_interval:
            _body["minimum_interval"] = (
                self._minimum_interval if isinstance(self._minimum_interval, str) else self._minimum_interval.value
            )
        return _body


class CategorizeText(Aggregation):
    """
    分类文本聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-categorize-text-aggregation.html

    This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to
    fix any issues, but features in technical preview are not subject to the support SLA of official GA features.
    A multi-bucket aggregation that groups semi-structured text into buckets. Each text field is re-analyzed using a
    custom analyzer. The resulting tokens are then categorized creating buckets of similarly formatted text values.
    This aggregation works best with machine generated text like system logs. Only the first 100 analyzed tokens are
    used to categorize the text.

    If you have considerable memory allocated to your JVM but are receiving circuit breaker exceptions from this
    aggregation, you may be attempting to categorize text that is poorly formatted for categorization. Consider adding
    categorization_filters or running under sampler or diversified sampler to explore the created categories.

    此功能处于技术预览阶段，可能会在将来的版本中更改或删除。Elastic 将努力修复任何问题，但技术预览版中的功能不受官方 GA 功能的支持 SLA 的约束。
    将半结构化文本分组到存储桶中的多存储桶聚合。使用自定义分析器重新分析每个文本字段。然后，对生成的令牌进行分类，创建格式相似的文本值的存储桶。此
    聚合最适合机器生成的文本，如系统日志。仅使用前 100 个分析的标记对文本进行分类。
    如果为 JVM 分配了大量内存，但从此聚合中收到断路器异常，则可能正在尝试对格式不佳的文本进行分类。考虑添加 categorization_filters 或在
    sampler 或 diversified sampler 下运行，以浏览创建的类别。

    :param field:
        (Required, string) The semi-structured text field to categorize.
        （必需，字符串）要分类的半结构化文本字段。
    :param max_unique_tokens:
        (Optional, integer, default: 50) The maximum number of unique tokens at any position up to max_matched_tokens.
        Must be larger than 1. Smaller values use less memory and create fewer categories. Larger values will use more
        memory and create narrower categories. Max allowed value is 100.
        （可选，整数，默认值：50）任何位置的唯一代币的最大数量，最多 max_matched_tokens。必须大于 1。值越小，使用的内存越少，创建的类别就越
        少。较大的值将使用更多内存并创建更窄的类别。允许的最大值为 100。
    :param max_matched_tokens:
        (Optional, integer, default: 5) The maximum number of token positions to match on before attempting to merge
        categories. Larger values will use more memory and create narrower categories. Max allowed value is 100.
        （可选，整数，默认值：5）在尝试合并类别之前要匹配的最大令牌位置数。较大的值将使用更多内存并创建更窄的类别。允许的最大值为 100。
    :param similarity_threshold:
        (Optional, integer, default: 50) The minimum percentage of tokens that must match for text to be added to the
        category bucket. Must be between 1 and 100. The larger the value the narrower the categories. Larger values
        will increase memory usage and create narrower categories.
        （可选，整数，默认值：50）将文本添加到类别存储桶中必须匹配的令牌的最小百分比。必须介于 1 和 100 之间。值越大，类别越窄。较大的值将增加
        内存使用量并创建较窄的类别。
    :param categorization_filters:
        (Optional, array of strings) This property expects an array of regular expressions. The expressions are used to
        filter out matching sequences from the categorization field values. You can use this functionality to fine tune
        the categorization by excluding sequences from consideration when categories are defined. For example, you can
        exclude SQL statements that appear in your log files. This property cannot be used at the same time as
        categorization_analyzer. If you only want to define simple regular expression filters that are applied prior to
        tokenization, setting this property is the easiest method. If you also want to customize the tokenizer or
        post-tokenization filtering, use the categorization_analyzer property instead and include the filters as
        pattern_replace character filters.
        （可选，字符串数组）此属性需要一个正则表达式数组。表达式用于从分类字段值中筛选出匹配的序列。您可以使用此功能来微调分类，方法是在定义类别
        时将序列排除在考虑范围之外。例如，您可以排除日志文件中显示的 SQL 语句。此属性不能与 categorization_analyzer同时使用。如果您只想定
        义在分词之前应用的简单正则表达式过滤器，则设置此属性是最简单的方法。如果您还想自定义分词器或分词后过滤，请改用
        categorization_analyzer 属性，并将过滤器作为pattern_replace字符过滤器包括在内。
    :param categorization_analyzer:
        (Optional, object or string) The categorization analyzer specifies how the text is analyzed and tokenized before
        being categorized. The syntax is very similar to that used to define the analyzer in the Analyze endpoint. This
        property cannot be used at the same time as categorization_filters.
        The categorization_analyzer field can be specified either as a string or as an object. If it is a string it must
        refer to a built-in analyzer or one added by another plugin. If it is an object it has the following properties:
        Properties of categorization_analyzer
            - char_filter
                (array of strings or objects) One or more character filters. In addition to the built-in character filters,
                other plugins can provide more character filters. This property is optional. If it is not specified, no
                character filters are applied prior to categorization. If you are customizing some other aspect of the
                analyzer and you need to achieve the equivalent of categorization_filters (which are not permitted when
                some other aspect of the analyzer is customized), add them here as pattern replace character filters.
            - tokenizer
                (string or object) The name or definition of the tokenizer to use after character filters are applied.
                This property is compulsory if categorization_analyzer is specified as an object. Machine learning
                provides a tokenizer called ml_standard that tokenizes in a way that has been determined to produce good
                categorization results on a variety of log file formats for logs in English. If you want to use that
                tokenizer but change the character or token filters, specify "tokenizer": "ml_standard" in your
                categorization_analyzer. Additionally, the ml_classic tokenizer is available, which tokenizes in the
                same way as the non-customizable tokenizer in old versions of the product (before 6.2). ml_classic was
                the default categorization tokenizer in versions 6.2 to 7.13, so if you need categorization identical to
                the default for jobs created in these versions, specify "tokenizer": "ml_classic" in your
                categorization_analyzer.
            - filter
                (array of strings or objects) One or more token filters. In addition to the built-in token filters,
                other plugins can provide more token filters. This property is optional. If it is not specified, no
                token filters are applied prior to categorization.

        （可选，对象或字符串）分类分析器指定在分类之前如何分析和标记文本。该语法与用于在 Analyze 终端节点中定义分析器的语法非常相似。此属性不能
        与 categorization_filters同时使用。
        categorization_analyzer 字段可以指定为字符串或对象。如果它是一个字符串，它必须引用内置分析器或由另一个插件添加的分析器。如果它是一个
        对象，则它具有以下属性：
        categorization_analyzer 的特性
            - char_filter
                （字符串或对象的数组）一个或多个字符筛选器。除了内置的字符过滤器外，其他插件还可以提供更多的字符过滤器。此属性是可选的。如果未指定，
                则在分类之前不应用字符过滤器。如果要自定义分析器的其他某些方面，并且需要实现等效的 categorization_filters（在自定义分析器的某些
                其他方面时不允许这样做），请在此处将它们添加为模式替换字符筛选器。
            - tokenizer
                （字符串或对象）应用字符筛选器后要使用的分词器的名称或定义。如果将 categorization_analyzer 指定为对象，则此属性是必需的。
                机器学习提供了一种称为 ml_standard 的分词器，它以一种已确定的方式进行分词化，以便在各种英文日志文件格式上产生良好的分类结果。
                如果要使用该分词器但更改字符或分词筛选器，请在categorization_analyzer中指定 “tokenizer”： “ml_standard”。此外，还可以
                使用ml_classic分词器，其分词方式与旧版本产品（6.2 之前）中不可自定义的分词器相同。ml_classic 是版本 6.2 到 7.13 中的默认
                分类分词器，因此，如果您需要与在这些版本中创建的作业的默认值相同的分类，请在categorization_analyzer中指定 “tokenizer”：
                “ml_classic”。
            - filter
                （字符串或对象的数组）一个或多个标记过滤器。除了内置的标记过滤器之外，其他插件还可以提供更多标记过滤器。此属性是可选的。如果未
                指定，则在分类之前不应用任何标记过滤器。
    :param shard_size:
        (Optional, integer) The number of categorization buckets to return from each shard before merging all the results.
        （可选，整数）在合并所有结果之前要从每个分片返回的分类存储桶数。
    :param size:
        (Optional, integer, default: 10) The number of buckets to return.
        （可选，整数，默认值：10）要返回的存储桶数。
    :param min_doc_count:
        (Optional, integer) The minimum number of documents for a bucket to be returned to the results.
        （可选，整数）要返回到结果的存储桶的最小文档数。
    :param shard_min_doc_count:
        (Optional, integer) The minimum number of documents for a bucket to be returned from the shard before merging.
        （可选，整数）合并之前要从分片返回的存储桶的最小文档数。
    """
    type: str = "categorize_text"

    def __init__(
            self, field: Union[_BaseField, str], max_unique_tokens: int = None, max_matched_tokens: int = None,
            similarity_threshold: int = None, categorization_filters: List[str] = None,
            categorization_analyzer: Union[BaseAnalyzer, Type[BaseAnalyzer], str, Dict] = None,
            shard_size: int = None, size: int = 10, min_doc_count: int = None, shard_min_doc_count: int = None
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._max_unique_tokens: int = max_unique_tokens
        self._max_matched_tokens: int = max_matched_tokens
        self._similarity_threshold: int = similarity_threshold
        self._categorization_filters: List[str] = categorization_filters
        self._categorization_analyzer: Union[BaseAnalyzer, Type[BaseAnalyzer], str, Dict] = categorization_analyzer
        self._shard_size: int = shard_size
        self._size: int = size
        self._min_doc_count: int = min_doc_count
        self._shard_min_doc_count: int = shard_min_doc_count
        return

    def _build(self) -> Dict[str, Any]:
        _body: Dict[str, Any] = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._max_unique_tokens is not None:
            _body["max_unique_tokens"] = self._max_unique_tokens
        if self._max_matched_tokens is not None:
            _body["max_matched_tokens"] = self._max_matched_tokens
        if self._similarity_threshold is not None:
            _body["similarity_threshold"] = self._similarity_threshold
        if self._categorization_filters:
            _body["categorization_filters"] = self._categorization_filters
        if self._categorization_analyzer:
            _body["categorization_analyzer"] = (
                self._categorization_analyzer
                if isinstance(self._categorization_analyzer, (str, Dict))
                else self._categorization_analyzer._build()
            )
        if self._shard_size is not None:
            _body["shard_size"] = self._shard_size
        if self._size is not None:
            _body["size"] = self._size
        if self._min_doc_count is not None:
            _body["min_doc_count"] = self._min_doc_count
        if self._shard_min_doc_count is not None:
            _body["shard_min_doc_count"] = self._shard_min_doc_count
        return _body


class Children(Aggregation):
    """
    子项聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-children-aggregation.html

    A special single bucket aggregation that selects child documents that have the specified type, as defined in a join field.
    一种特殊的单个存储桶聚合，用于选择具有指定类型（如联接字段中所定义）的子文档。

    :param type_:
        The child type that should be selected.
        应选择的子类型。
    """
    type: str = "children"

    def __init__(self, type_: str):
        super().__init__()
        self._type: str = type_
        return

    def _build(self) -> Dict[str, Any]:
        return {"type": self._type}


class Composite(Aggregation):
    """
    复合聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-composite-aggregation.html

    The composite aggregation is expensive. Load test your application before deploying a composite aggregation in
    production.
    A multi-bucket aggregation that creates composite buckets from different sources.
    Unlike the other multi-bucket aggregations, you can use the composite aggregation to paginate all buckets from a
    multi-level aggregation efficiently. This aggregation provides a way to stream all buckets of a specific
    aggregation, similar to what scroll does for documents.
    The composite buckets are built from the combinations of the values extracted/created for each document and each
    combination is considered as a composite bucket.

    复合聚合成本高昂。在生产环境中部署复合聚合之前，请对应用程序进行负载测试。
    一种多存储桶聚合，可从不同来源创建复合存储桶。
    与其他多存储桶聚合不同，您可以使用复合聚合有效地从多级聚合中对所有存储桶进行分页。此聚合提供了一种流式传输特定聚合的所有存储桶的方法，类似于 scroll 对文档的作用。
    复合存储段是根据为每个文档提取/创建的值的组合构建的，每个组合都被视为一个复合存储段。

    :param sources:
        The sources parameter defines the source fields to use when building composite buckets. The order that the
        sources are defined controls the order that the keys are returned.
        You must use a unique name when defining sources.
        The sources parameter can be any of the following types:
            - Terms
            - Histogram
            - Date histogram
            - GeoTile grid GeoTile

        sources 参数定义构建复合存储桶时要使用的源字段。定义源的顺序控制键的返回顺序。
        定义源时，必须使用唯一名称。
        sources 参数可以是以下任何类型：
            - 条款
            - 直方图
            - 日期直方图
            - 网格
    :param size:
        The size parameter can be set to define how many composite buckets should be returned. Each composite bucket is
        considered as a single bucket, so setting a size of 10 will return the first 10 composite buckets created from
        the value sources. The response contains the values for each composite bucket in an array containing the values
        extracted from each value source. Defaults to 10.
        可以设置 size 参数来定义应返回多少个复合存储桶。每个复合存储桶都被视为一个存储桶，因此将大小设置为 10 将返回从值源创建的前 10 个复合存
        储桶。响应包含数组中每个复合存储桶的值，该数组包含从每个值源提取的值。默认值为 10。
    :param after:
    """
    type: str = "composite"

    def __init__(
            self, sources: Dict[str, Union["Terms", "Histogram", "DateHistogram", "GeoTile"]], size: int = None,
            after: Dict = None
    ):
        super().__init__()
        self._sources: Dict[str, Union["Terms", "Histogram", "DateHistogram", "GeoTile"]] = sources
        self._size: int = size
        self._after: Dict = after
        return

    def _build(self) -> Dict[str, Any]:
        _body: Dict[str, Any] = {"sources": [{name: agg._build()} for name, agg in self._sources.items()]}
        if self._size is not None:
            _body["size"] = self._size
        if self._after is not None:
            _body["after"] = self._after
        return _body


class DateHistogram(Aggregation):
    """
    日期直方图聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-datehistogram-aggregation.html

    This multi-bucket aggregation is similar to the normal histogram, but it can only be used with date or date range
    values. Because dates are represented internally in Elasticsearch as long values, it is possible, but not as
    accurate, to use the normal histogram on dates as well. The main difference in the two APIs is that here the
    interval can be specified using date/time expressions. Time-based data requires special support because time-based
    intervals are not always a fixed length.
    Like the histogram, values are rounded down into the closest bucket.

    这种多存储桶聚合类似于普通直方图，但它只能与日期或日期范围值一起使用。由于日期在 Elasticsearch 内部表示为长值，因此也可以对日期使用普通直
    方图，但精度不高。这两个 API 的主要区别在于，此处可以使用日期/时间表达式指定间隔。基于时间的数据需要特殊支持，因为基于时间的间隔并不总是固定
    长度。
    与直方图一样，值将向下舍入到最近的存储桶中。

    :param field:
        todo 增加运行时字段
    :param calendar_interval:
        Calendar-aware intervals are configured with the calendar_interval parameter. You can specify calendar intervals
        using the unit name, such as month, or as a single unit quantity, such as 1M. For example, day and 1d are
        equivalent. Multiple quantities, such as 2d, are not supported.
        日历感知间隔使用 calendar_interval 参数进行配置。您可以使用单位名称 （如月） 或单个单位数量 （如 1M） 指定日历间隔。例如，day 和
        1d 是等效的。不支持多个数量，如 2d。
    :param fixed_interval:
        Fixed intervals are configured with the fixed_interval parameter.
        In contrast to calendar-aware intervals, fixed intervals are a fixed number of SI units and never deviate,
        regardless of where they fall on the calendar. One second is always composed of 1000ms. This allows fixed
        intervals to be specified in any multiple of the supported units.
        However, it means fixed intervals cannot express other units such as months, since the duration of a month is
        not a fixed quantity. Attempting to specify a calendar interval like month or quarter will throw an exception.

        固定间隔使用 fixed_interval 参数进行配置。
        与日历感知间隔相比，固定间隔是固定数量的 SI 单位，无论它们在日历上的哪个位置，都不会偏离。一秒始终由 1000 毫秒组成。这允许以任何受支持单位的倍数指定固定间隔。
        但是，这意味着固定间隔不能表示其他单位，例如月，因为月的持续时间不是固定数量。尝试指定日历间隔（如月或季度）将引发异常。
    :param format_:
        Internally, a date is represented as a 64 bit number representing a timestamp in milliseconds-since-the-epoch
        (01/01/1970 midnight UTC). These timestamps are returned as the key name of the bucket.
        If you don’t specify format, the first date format specified in the field mapping is used.

        在内部，日期表示为一个 64 位数字，表示自纪元以来的时间戳（1970 年 1 月 1 日午夜 UTC）。这些时间戳作为存储桶的键名称返回。
        如果未指定 format，则使用字段映射中指定的第一个日期格式。
    :param time_zone:
        Elasticsearch stores date-times in Coordinated Universal Time (UTC). By default, all bucketing and rounding is
        also done in UTC. Use the time_zone parameter to indicate that bucketing should use a different time zone.
        For example, if the interval is a calendar day and the time zone is America/New_York then 2020-01-03T01:00:01Z
        is : # Converted to 2020-01-02T18:00:01 # Rounded down to 2020-01-02T00:00:00 # Then converted back to UTC to
        produce 2020-01-02T05:00:00:00Z # Finally, when the bucket is turned into a string key it is printed in
        America/New_York so it’ll display as "2020-01-02T00:00:00".

        Elasticsearch 以协调世界时 （UTC） 存储日期时间。默认情况下，所有分桶和舍入也以 UTC 完成。使用 time_zone 参数指示分桶应使用不同
        的时区。
        例如，如果间隔是一个日历日，时区是美国/New_York 那么 2020-01-03T01：00：01Z 是 ： # 转换为 2020-01-02T18：00：01 # 向下舍入
        到 2020-01-02T00：00：00 # 然后转换回 UTC 以生成 2020-01-02T05：00：00：00Z # 最后，当存储桶转换为字符串键时，它会以
        America/New_York 打印，因此它将显示为 “2020-01-02T00：00：00”。
    :param offset:
        Use the offset parameter to change the start value of each bucket by the specified positive (+) or negative
        offset (-) duration, such as 1h for an hour, or 1d for a day. See Time units for more possible time duration
        options.
        使用 offset 参数按指定的正 （+） 或负偏移量 （-） 持续时间更改每个存储桶的起始值，例如 1h 表示一小时，或 1d 表示一天。有关更多可能
        的持续时间选项，请参阅时间单位。
    :param keyed:
        Setting the keyed flag to true associates a unique string key with each bucket and returns the ranges as a hash
        rather than an array.
        将 keyed 标志设置为 true 会将唯一的字符串键与每个存储桶相关联，并将范围作为哈希而不是数组返回。
    :param missing:
        The missing parameter defines how to treat documents that are missing a value. By default, they are ignored,
        but it is also possible to treat them as if they have a value.
        missing 参数定义如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    :param order:
        By default the returned buckets are sorted by their key ascending, but you can control the order using the order
        setting. This setting supports the same order functionality as Terms Aggregation.
        默认情况下，返回的存储桶按键升序排序，但您可以使用 order 设置控制顺序。此设置支持与 Terms Aggregation 相同的 order 功能。
    """
    type: str = "date_histogram"

    def __init__(
            self, field: Union[_BaseTimeField, _BaseField, str],
            calendar_interval: Union[
                Literal[
                    "year", "1y", "quarter", "1q", "month", "1M", "week", "1w", "day", "1d", "hour", "1h", "minute",
                    "1m"
                ],
                CalendarInterval
            ] = None,
            fixed_interval: Union[
                Literal["milliseconds", "ms", "seconds", "s", "minutes", "m", "hours", "h", "days", "d"], FixedInterval
            ] = None,
            format_: Union[str, TimeFormat] = None, time_zone: str = None, offset: str = None, keyed: bool = None,
            missing: str = None, order: Union[OrderType, str] = None
    ):
        super().__init__()
        self._field: Union[_BaseTimeField, _BaseField, str] = field
        self._calendar_interval: Union[
            Literal[
                "year", "1y", "quarter", "1q", "month", "1M", "week", "1w", "day", "1d", "hour", "1h", "minute",
                "1m"
            ],
            CalendarInterval,
            None
        ] = calendar_interval
        self._fixed_interval: Union[
            Literal["milliseconds", "ms", "seconds", "s", "minutes", "m", "hours", "h", "days", "d"],
            FixedInterval,
            None
        ] = fixed_interval
        self._format: Union[str, TimeFormat] = format_
        self._time_zone: str = time_zone
        self._offset: str = offset
        self._keyed: bool = keyed
        self._missing: str = missing
        self._order: Union[OrderType, str] = order
        return

    def _build(self) -> Dict:
        _body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._calendar_interval:
            _body["calendar_interval"] = (
                self._calendar_interval if isinstance(self._calendar_interval, str) else self._calendar_interval.value
            )
        if self._fixed_interval:
            _body["fixed_interval"] = (
                self._fixed_interval if isinstance(self._fixed_interval, str) else self._fixed_interval.value
            )
        if self._format:
            _body["format"] = self._format if isinstance(self._format, str) else self._format.value
        if self._time_zone:
            _body["time_zone"] = self._time_zone
        if self._offset:
            _body["offset"] = self._offset
        if self._keyed is not None:
            _body["keyed"] = self._keyed
        if self._missing:
            _body["missing"] = self._missing
        if self._order:
            _body["order"] = self._order if isinstance(self._order, str) else self._order.value
        return _body


class DateRange(Aggregation):
    """
    日期范围聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-daterange-aggregation.html

    A range aggregation that is dedicated for date values. The main difference between this aggregation and the normal
    range aggregation is that the from and to values can be expressed in Date Math expressions, and it is also possible
    to specify a date format by which the from and to response fields will be returned. Note that this aggregation
    includes the from value and excludes the to value for each range.
    专用于日期值的范围聚合。此聚合与常规范围聚合之间的主要区别在于，from 和 to 值可以用 Date Math 表达式表示，并且还可以指定返回 from 和 to
    响应字段的日期格式。请注意，此聚合包括 from 值，不包括每个范围的 to 值。

    :param field:
    :param format_:
    :param ranges:
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value. This is done by adding a set of
        fieldname : value mappings to specify default values per field.
        的 missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。这是通过添加一组 fieldname ： value
        映射来指定每个字段的默认值来完成的。
    :param time_zone:
        Dates can be converted from another time zone to UTC by specifying the time_zone parameter.
        Time zones may either be specified as an ISO 8601 UTC offset (e.g. +01:00 or -08:00) or as one of the time zone
        ids from the TZ database.
        The time_zone parameter is also applied to rounding in date math expressions.

        通过指定 time_zone 参数，可以将日期从另一个时区转换为 UTC。
        时区可以指定为 ISO 8601 UTC 偏移量（例如 +01：00 或 -08：00）或 TZ 数据库中的时区 ID 之一。
        time_zone 参数也适用于日期数学表达式中的舍入。
    :param keyed:
        Setting the keyed flag to true will associate a unique string key with each bucket and return the ranges as a
        hash rather than an array.
        将 keyed 标志设置为 true 将为每个存储桶关联一个唯一的字符串键，并将范围作为哈希而不是数组返回。
    """
    type: str = "date_range"

    def __init__(
            self, field: Union[_BaseTimeField, _BaseField, str], format_: Union[str, TimeFormat] = None,
            ranges: List[Dict[Literal["key", "from", "to"], Union[str, Number]], _Range] = None, missing: str = None,
            time_zone: str = None, keyed: bool = None
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._format: Union[str, TimeFormat] = format_
        self._ranges: List[Dict[Literal["key", "from", "to"], Union[str, Number]], _Range] = ranges
        self._missing: str = missing
        self._time_zone: str = time_zone
        self._keyed: bool = keyed
        return

    def _build(self) -> Dict:
        _body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._format:
            _body["format"] = self._format if isinstance(self._format, str) else self._format.value
        if self._ranges:
            _body["ranges"] = [_range if isinstance(_range, dict) else _range._build() for _range in self._ranges]
        if self._missing:
            _body["missing"] = self._missing
        if self._time_zone:
            _body["time_zone"] = self._time_zone
        if self._keyed is not None:
            _body["keyed"] = self._keyed
        return _body


class DiversifiedSampler(Aggregation):
    """
    多样化的采样器聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-diversified-sampler-aggregation.html

    Like the sampler aggregation this is a filtering aggregation used to limit any sub aggregations' processing to a
    sample of the top-scoring documents. The diversified_sampler aggregation adds the ability to limit the number of
    matches that share a common value such as an "author".
    与 sampler 聚合一样，这是一个筛选聚合，用于将任何子聚合的处理限制为得分最高的文档样本。diversified_sampler 聚合增加了限制共享公共值
    （如“author”）的匹配项数的功能。

    :param field:
    :param shard_size:
        The shard_size parameter limits how many top-scoring documents are collected in the sample processed on each
        shard. The default value is 100.
        shard_size 参数限制在每个分片上处理的样本中收集的得分最高的文档数量。默认值为 100。
    :param max_docs_per_value:
        The max_docs_per_value is an optional parameter and limits how many documents are permitted per choice of
        de-duplicating value. The default setting is "1".
        max_docs_per_value 是一个可选参数，用于限制每个重复数据删除值选项允许的文档数。默认设置为 “1”。
    :param execution_hint:
        The optional execution_hint setting can influence the management of the values used for de-duplication. Each
        option will hold up to shard_size values in memory while performing de-duplication but the type of value held
        can be controlled as follows:
            - hold field values directly (map)
            - hold ordinals of the field as determined by the Lucene index (global_ordinals)
            - hold hashes of the field values - with potential for hash collisions (bytes_hash)
        The default setting is to use global_ordinals if this information is available from the Lucene index and
        reverting to map if not. The bytes_hash setting may prove faster in some cases but introduces the possibility
        of false positives in de-duplication logic due to the possibility of hash collisions. Please note that
        Elasticsearch will ignore the choice of execution hint if it is not applicable and that there is no backward
        compatibility guarantee on these hints.

        可选的 execution_hint 设置可以影响用于重复数据删除的值的管理。在执行重复数据删除时，每个选项将在内存中最多保留 shard_size 个值，但
        保留的值类型可以按如下方式控制：
            - 直接保留字段值 （Map）
            - 保存由 Lucene 索引 （global_ordinals） 确定的字段的序数
            - 保留字段值的哈希值 - 可能存在哈希冲突 （bytes_hash）
        默认设置是使用 global_ordinals 如果此信息可从 Lucene 索引获得，则恢复为 map。在某些情况下，bytes_hash 设置可能会更快，但由于可能
        存在哈希冲突，因此在重复数据删除逻辑中引入了误报的可能性。请注意，如果执行提示的选择不适用，Elasticsearch 将忽略该选择，并且这些提示没
        有向后兼容性保证。
    """
    type: str = "diversified_sampler"

    def __init__(
            self, field: Union[_BaseField, str], shard_size: int = None, max_docs_per_value: int = None,
            execution_hint: Union[ExecutionHint, Literal["map", "global_ordinals", "bytes_hash"]] = None
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._shard_size: int = shard_size
        self._max_docs_per_value: int = max_docs_per_value
        self._execution_hint: Union[
            ExecutionHint, Literal["map", "global_ordinals", "bytes_hash"], None
        ] = execution_hint
        return

    def _build(self) -> Dict:
        _body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._shard_size:
            _body["shard_size"] = self._shard_size
        if self._max_docs_per_value is not None:
            _body["max_docs_per_value"] = self._max_docs_per_value
        if self._execution_hint:
            _body["execution_hint"] = (
                self._execution_hint if isinstance(self._execution_hint, str) else self._execution_hint.value
            )
        return _body


class Filter(Aggregation):
    """
    筛选器聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-filter-aggregation.html

    A single bucket aggregation that narrows the set of documents to those that match a query.
    单个存储桶聚合，用于将文档集缩小到与查询匹配的文档集。

    :param :
    """
    type: str = "filter"

    def __init__(self, query: Union[_BaseQueries, Dict]):
        super().__init__()
        self._query: Union[_BaseQueries, Dict] = query
        return

    def _build(self) -> Dict:
        _body: Dict = self._query if isinstance(self._query, Dict) else {self._query.type: self._query._build()}
        return _body


class Filters(Aggregation):
    """
    筛选条件聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-filters-aggregation.html

    A multi-bucket aggregation where each bucket contains the documents that match a query.
    一种多存储桶聚合，其中每个存储桶都包含与查询匹配的文档。

    :param filters:
    :param other_bucket:
        The other_bucket parameter can be set to add a bucket to the response which will contain all documents that do
        not match any of the given filters. The value of this parameter can be as follows:
            - false
                Does not compute the other bucket
            - true
                Returns the other bucket either in a bucket (named _other_ by default) if named filters are being used,
                or as the last bucket if anonymous filters are being used

        可以设置 other_bucket 参数以将存储桶添加到响应中，该存储桶将包含与任何给定过滤器不匹配的所有文档。此参数的值可以如下所示：
            - false
                不计算其他存储桶
            - true
                如果使用命名过滤器，则返回存储桶（默认名为 _other_）中的另一个存储桶，如果使用匿名过滤器，则返回最后一个存储桶
    :param other_bucket_key:
        The other_bucket_key parameter can be used to set the key for the other bucket to a value other than the default
        _other_. Setting this parameter will implicitly set the other_bucket parameter to true.
        other_bucket_key 参数可用于将另一个存储桶的键设置为默认 _other_ 以外的值。设置此参数会将 other_bucket 参数隐式设置为 true。
    """
    type: str = "filters"

    def __init__(
            self, filters: Union[Dict[str, Union[_BaseQueries, Dict]], List[Union[_BaseQueries, Dict]]],
            other_bucket: bool = None, other_bucket_key: str = None
    ):
        super().__init__()
        self._filters: Union[Dict[str, Union[_BaseQueries, Dict]], List[Union[_BaseQueries, Dict]]] = filters
        self._other_bucket: bool = other_bucket
        self._other_bucket_key: str = other_bucket_key
        return

    def _build(self) -> Dict:
        _body: Dict = {
            "filters": {
                key: value if isinstance(value, Dict) else {value.type: value._build()}
                for key, value in self._filters.items()
            } if isinstance(self._filters, Dict) else {
                "filters": [
                    value if isinstance(value, Dict) else {value.type: value._build()}
                    for value in self._filters
                ]
            }
        }
        if self._other_bucket is not None:
            _body["other_bucket"] = self._other_bucket
        if self._other_bucket_key:
            _body["other_bucket_key"] = self._other_bucket_key
        return _body


class GeoDistance(Aggregation):
    """
    地理距离聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-geodistance-aggregation.html

    A multi-bucket aggregation that works on geo_point fields and conceptually works very similar to the range
    aggregation. The user can define a point of origin and a set of distance range buckets. The aggregation evaluates
    the distance of each document value from the origin point and determines the buckets it belongs to based on the
    ranges (a document belongs to a bucket if the distance between the document and the origin falls within the distance
    range of the bucket).
    多存储桶聚合，适用于geo_point字段，在概念上的工作方式与范围聚合非常相似。用户可以定义一个原点和一组距离范围存储桶。聚合评估每个文档值与原点
    的距离，并根据范围确定它所属的存储桶（如果文档与源之间的距离在存储桶的距离范围内，则文档属于存储桶）。

    :param field:
    :param origin:
        Object format: { "lat" : 52.3760, "lon" : 4.894 } - this is the safest format as it is the most explicit about
        the lat & lon values
        String format: "52.3760, 4.894" - where the first number is the lat and the second is the lon
        Array format: [4.894, 52.3760] - which is based on the GeoJSON standard where the first number is the lon and
        the second one is the lat

        对象格式：{ “lat” ： 52.3760， “lon” ： 4.894 } - 这是最安全的格式，因为它对纬度和经度值是最明确的
        字符串格式：“52.3760， 4.894” - 其中第一个数字是纬度，第二个数字是经度
        数组格式：[4.894， 52.3760] - 基于 GeoJSON 标准，其中第一个数字是经度，第二个数字是纬度
    :param ranges:
    :param unit:
        By default, the distance unit is m (meters) but it can also accept: mi (miles), in (inches), yd (yards),
        km (kilometers), cm (centimeters), mm (millimeters).
        默认情况下，距离单位为 m（米），但它也可以接受：mi（英里）、in（英寸）、yd（码）、km（公里）、cm（厘米）、mm（毫米）。
    :param distance_type:
        There are two distance calculation modes: arc (the default), and plane. The arc calculation is the most
        accurate. The plane is the fastest but least accurate. Consider using plane when your search context is
        "narrow", and spans smaller geographical areas (~5km). plane will return higher error margins for searches
        across very large areas (e.g. cross continent search). The distance calculation type can be set using the
        distance_type parameter:
        有两种距离计算模式：arc（默认）和 plane。arc 计算最准确。plane最快，但最不准确。当您的搜索上下文“狭窄”且跨越较小的地理区域（~5km）
        时，请考虑使用plane。plane将返回更高的误差边距，用于跨非常大的区域搜索（例如跨大陆搜索）。
    :param keyed:
        Setting the keyed flag to true will associate a unique string key with each bucket and return the ranges as a
        hash rather than an array.
        将 keyed 标志设置为 true 将为每个存储桶关联一个唯一的字符串键，并将范围作为哈希而不是数组返回。
    """
    type: str = "geo_distance"

    def __init__(
            self, field: Union[_BaseGeoField, str], origin: Union[str, Dict[Literal["lat", "lon"], Number], List[Number]],
            ranges: List[Union[Dict, _Range]], unit: str = None, distance_type: str = None, keyed: bool = None
    ):
        super().__init__()
        self._field: Union[_BaseGeoField, str] = field
        self._origin: Union[str, Dict[Literal["lat", "lon"], Number], List[Number]] = origin
        self._ranges: List[Union[Dict, _Range]] = ranges
        self._unit: str = unit
        self._distance_type: str = distance_type
        self._keyed: bool = keyed
        return

    def _build(self) -> Dict:
        _body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
            "origin": self._origin,
            "ranges": [
                _range if isinstance(_range, Dict) else _range._build()
                for _range in self._ranges
            ]
        }
        if self._unit:
            _body["unit"] = self._unit
        if self._distance_type:
            _body["distance_type"] = self._distance_type
        if self._keyed:
            _body["keyed"] = self._keyed
        return _body


class GeohashGrid(Aggregation):
    """
    Geohash 网格聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-geohashgrid-aggregation.html

    A multi-bucket aggregation that groups geo_point and geo_shape values into buckets that represent a grid. The
    resulting grid can be sparse and only contains cells that have matching data. Each cell is labeled using a geohash
    which is of user-definable precision.
        - High precision geohashes have a long string length and represent cells that cover only a small area.
        - Low precision geohashes have a short string length and represent cells that each cover a large area.
    Geohashes used in this aggregation can have a choice of precision between 1 and 12.
    You can only use geohash_grid to aggregate an explicitly mapped geo_point or geo_shape field. If the geo_point field
    contains an array, geohash_grid aggregates all the array values.

    一种多存储桶聚合，用于将 geo_point 和 geo_shape 值分组到表示网格的存储桶中。生成的网格可以是稀疏的，并且仅包含具有匹配数据的单元格。每个
    单元格都使用用户可定义的精度的 geohash 进行标记。
        - 高精度 geohash 具有较长的字符串长度，表示仅覆盖一小块区域的单元格。
        - 低精度 geohash 的字符串长度较短，表示每个 cell 覆盖大面积的 cell。
    此聚合中使用的 Geohash 可以在 1 到 12 之间选择精度。
    您只能使用 geohash_grid 来聚合显式映射的 geo_point 或 geo_shape 字段。如果 geo_point 字段包含数组，则 geohash_grid 聚合所有数组值。

    :param field:
        Mandatory. The name of the field indexed with GeoPoints.
        命令的。 使用 GeoPoints 编制索引的字段的名称。
    :param precision:
        Optional. The string length of the geohashes used to define cells/buckets in the results. Defaults to 5. The
        precision can either be defined in terms of the integer precision levels mentioned above. Values outside of
        [1,12] will be rejected. Alternatively, the precision level can be approximated from a distance measure like
        "1km", "10m". The precision level is calculate such that cells will not exceed the specified size (diagonal) of
        the required precision. When this would lead to precision levels higher than the supported 12 levels, (e.g. for
        distances <5.6cm) the value is rejected.
        自选。 用于在结果中定义单元格/存储桶的 geohash 的字符串长度。默认为 5。精度可以根据上述整数精度级别进行定义。[1,12] 之外的值将被拒绝。
        或者，精度级别可以从距离度量（如 “1km”、“10m”）进行近似计算。计算精度级别时，单元格不会超过所需精度的指定大小（对角线）。当这会导致精度
        级别高于支持的 12 个级别时（例如，对于距离 <5.6cm），该值将被拒绝。
    :param bounds:
        Optional. The bounding box to filter the points in the bucket.
        自选。 用于筛选存储桶中点的边界框。
    :param size:
        Optional. The maximum number of geohash buckets to return (defaults to 10,000). When results are trimmed,
        buckets are prioritised based on the volumes of documents they contain.
        自选。 要返回的 geohash 存储桶的最大数量（默认为 10,000）。修剪结果时，将根据存储桶包含的文档量确定存储桶的优先级。
    :param shard_size:
        Optional. To allow for more accurate counting of the top cells returned in the final result the aggregation
        defaults to returning max(10,(size x number-of-shards)) buckets from each shard. If this heuristic is
        undesirable, the number considered from each shard can be over-ridden using this parameter.
        自选。 为了更准确地计算最终结果中返回的前几个单元格，聚合默认从每个分片返回 max（10，（size x number-of-shards）） 存储桶。如果这
        种启发式方法不可取，则可以使用此参数覆盖每个分片中考虑的数字。
    """
    type: str = "geohash_grid"

    def __init__(
            self, field: Union[_BaseGeoField, str], precision: int = None, bounds: Union[Dict, Bounds] = None,
            size: int = None,
            shard_size: int = None
    ):
        super().__init__()
        self._field: Union[_BaseGeoField, str] = field
        self._precision: int = precision
        self._bounds: Union[Dict, Bounds] = bounds
        self._size: int = size
        self._shard_size: int = shard_size
        return

    def _build(self) -> Dict:
        _body: Dict = {
            "field": self._field._field_name
        }
        if self._precision is not None:
            _body["precision"] = self._precision
        if self._bounds:
            _body["bounds"] = self._bounds if isinstance(self._bounds, Bounds) else self._bounds._build()
        if self._size is not None:
            _body["size"] = self._size
        if self._shard_size is not None:
            _body["shard_size"] = self._shard_size
        return _body


class GeotileGrid(Aggregation):
    """
    地理网格聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-geotilegrid-aggregation.html

    A multi-bucket aggregation that groups geo_point and geo_shape values into buckets that represent a grid. The
    resulting grid can be sparse and only contains cells that have matching data. Each cell corresponds to a map tile
    as used by many online map sites. Each cell is labeled using a "{zoom}/{x}/{y}" format, where zoom is equal to the
    user-specified precision.
    High precision keys have a larger range for x and y, and represent tiles that cover only a small area.
    Low precision keys have a smaller range for x and y, and represent tiles that each cover a large area.
    See Zoom level documentation on how precision (zoom) correlates to size on the ground. Precision for this
    aggregation can be between 0 and 29, inclusive.

    一种多存储桶聚合，用于将 geo_point 和 geo_shape 值分组到表示网格的存储桶中。生成的网格可以是稀疏的，并且仅包含具有匹配数据的单元格。每个
    单元格对应于许多在线地图站点使用的地图图块。每个单元格都使用“{zoom}/{x}/{y}”格式进行标记，其中缩放等于用户指定的精度。
    高精度键对 x 和 y 具有较大的范围，并且表示仅覆盖较小区域的图块。
    低精度键的 x 和 y 范围较小，表示每个键覆盖较大区域的图块。
    请参阅缩放级别文档，了解精度 （缩放） 与地面大小的关系。此聚合的精度可以介于 0 到 29 之间（包括 0 和 29）。

    :param field:
        Mandatory. The name of the field indexed with GeoPoints.
        命令的。 使用 GeoPoints 编制索引的字段的名称。
    :param precision:
        Optional. The integer zoom of the key used to define cells/buckets in the results. Defaults to 7. Values
        outside of [0,29] will be rejected.
        自选。 用于定义结果中的单元格/存储桶的键的整数缩放。默认值为 7。[0,29] 之外的值将被拒绝。
    :param bounds:
        Optional. The bounding box to filter the points in the bucket.
        可选。用于筛选存储桶中点的边界框。
    :param size:
        Optional. The maximum number of geohash buckets to return (defaults to 10,000). When results are trimmed,
        buckets are prioritised based on the volumes of documents they contain.
        自选。 要返回的 geohash 存储桶的最大数量（默认为 10,000）。修剪结果时，将根据存储桶包含的文档量确定存储桶的优先级。
    :param shard_size:
        Optional. To allow for more accurate counting of the top cells returned in the final result the aggregation
        defaults to returning max(10,(size x number-of-shards)) buckets from each shard. If this heuristic is
        undesirable, the number considered from each shard can be over-ridden using this parameter.
        自选。 为了更准确地计算最终结果中返回的前几个单元格，聚合默认从每个分片返回 max（10，（size x number-of-shards）） 存储桶。如果
        这种启发式方法不可取，则可以使用此参数覆盖每个分片中考虑的数字。
    """
    type: str = "geotile_grid"

    def __init__(
            self, field: Union[_BaseGeoField, str], precision: int = None, bounds: Union[Dict, Bounds] = None,
            size: int = None,
            shard_size: int = None
    ):
        super().__init__()
        self._field: Union[_BaseGeoField, str] = field
        self._precision: int = precision
        self._bounds: Union[Dict, Bounds] = bounds
        self._size: int = size
        self._shard_size: int = shard_size
        return

    def _build(self) -> Dict:
        _body: Dict = {
            "field": self._field._field_name
        }
        if self._precision is not None:
            _body["precision"] = self._precision
        if self._bounds:
            _body["bounds"] = self._bounds if isinstance(self._bounds, Bounds) else self._bounds._build()
        if self._size is not None:
            _body["size"] = self._size
        if self._shard_size is not None:
            _body["shard_size"] = self._shard_size
        return _body


class Global(Aggregation):
    """
    全局聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-global-aggregation.html

    Defines a single bucket of all the documents within the search execution context. This context is defined by the
    indices and the document types you’re searching on, but is not influenced by the search query itself.
    定义搜索执行上下文中所有文档的单个存储桶。此上下文由索引和您正在搜索的文档类型定义，但不受搜索查询本身的影响。
    """
    type: str = "global"

    def __init__(self):
        super().__init__()
        return

    def _build(self) -> Dict:
        return {}


class Histogram(Aggregation):
    """
    直方图聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-histogram-aggregation.html

    A multi-bucket values source based aggregation that can be applied on numeric values or numeric range values
    extracted from the documents. It dynamically builds fixed size (a.k.a. interval) buckets over the values.
    基于多存储桶值源的聚合，可应用于从文档中提取的数值或数值范围值。它在值上动态构建固定大小（又名间隔）的存储桶。

    :param field:
    :param interval:
    :param min_doc_count:
        By default the response will fill gaps in the histogram with empty buckets. It is possible change that and
        request buckets with a higher minimum count thanks to the min_doc_count setting
        默认情况下，响应将使用空桶填充直方图中的空白。由于 min_doc_count 设置，可以更改该设置并请求具有更高最小计数的桶
    :param extended_bounds:
        With extended_bounds setting, you now can "force" the histogram aggregation to start building buckets on a
        specific min value and also keep on building buckets up to a max value (even if there are no documents anymore).
        Using extended_bounds only makes sense when min_doc_count is 0 (the empty buckets will never be returned if
        min_doc_count is greater than 0).
        Note that (as the name suggest) extended_bounds is not filtering buckets. Meaning, if the extended_bounds.min is
        higher than the values extracted from the documents, the documents will still dictate what the first bucket will
        be (and the same goes for the extended_bounds.max and the last bucket). For filtering buckets, one should nest
        the histogram aggregation under a range filter aggregation with the appropriate from/to settings.

        使用 extended_bounds 设置，您现在可以“强制”直方图聚合开始在特定的最小值上构建存储桶，并继续构建存储桶，直到达到最大值（即使不再有
        文档）。仅当 min_doc_count 为 0 时，使用 extended_bounds 才有意义（如果 min_doc_count 大于 0，则永远不会返回空存储桶）。
        请注意（顾名思义）extended_bounds 不是筛选存储桶。这意味着，如果 extended_bounds.min 高于从文档中提取的值，则文档仍将决定第一
        个存储桶是什么（extended_bounds.max和最后一个存储桶也是如此）。对于筛选存储桶，应使用适当的 from/to 设置将直方图聚合嵌套在范围筛
        选器聚合下。
    :param hard_bounds:
        The hard_bounds is a counterpart of extended_bounds and can limit the range of buckets in the histogram. It is
        particularly useful in the case of open data ranges that can result in a very large number of buckets.
        hard_bounds 是 extended_bounds 的对应项，可以限制直方图中的存储桶范围。在开放数据范围可能导致大量存储桶的情况下，它特别有用。
    :param order:
        By default the returned buckets are sorted by their key ascending, though the order behaviour can be controlled
        using the order setting. Supports the same order functionality as the Terms Aggregation.
        默认情况下，返回的存储桶按其键升序排序，但可以使用 order 设置来控制 order 行为。支持与 Terms Aggregation 相同的 order 功能。
    :param offset:
        By default the bucket keys start with 0 and then continue in even spaced steps of interval, e.g. if the interval
        is 10, the first three buckets (assuming there is data inside them) will be [0, 10), [10, 20), [20, 30). The
        bucket boundaries can be shifted by using the offset option.
        默认情况下，存储桶键从 0 开始，然后以均匀间隔的间隔继续，例如，如果间隔为 10，则前三个存储桶（假设其中有数据）将为
        [0， 10）、[10， 20）、[20， 30）。可以使用 offset 选项移动存储桶边界。
    :param keyed:
        By default, the buckets are returned as an ordered array. It is also possible to request the response as a hash
        instead keyed by the buckets keys:
        默认情况下，存储桶作为有序数组返回。也可以请求哈希响应，而不是以存储桶键为键
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    """
    type: str = "histogram"

    def __init__(
            self, field: Union[_BaseField, str], interval: int, min_doc_count: int = None,
            extended_bounds: Dict[Literal["min", "max"], int] = None,
            hard_bounds: Dict[Literal["min", "max"], int] = None, order: Union[Dict, Order] = None, offset: int = None,
            keyed: bool = None, missing: Any = None
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._interval: int = interval
        self._min_doc_count: int = min_doc_count
        self._extended_bounds: Dict[Literal["min", "max"], int] = extended_bounds
        self._hard_bounds: Dict[Literal["min", "max"], int] = hard_bounds
        self._order: Union[Dict, Order] = order
        self._offset: int = offset
        self._keyed: bool = keyed
        self._missing: Any = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
            "interval": self._interval,
        }
        if self._min_doc_count is not None:
            body["min_doc_count"] = self._min_doc_count
        if self._extended_bounds:
            body["extended_bounds"] = self._extended_bounds
        if self._hard_bounds:
            body["hard_bounds"] = self._hard_bounds
        if self._order:
            body["order"] = self._order._build() if isinstance(self._order, Order) else self._order
        if self._offset is not None:
            body["offset"] = self._offset
        if self._keyed is not None:
            body["keyed"] = self._keyed
        if self._missing is not None:
            body["missing"] = self._missing
        return body


class IPRange(Aggregation):
    """
    IP 范围聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-iprange-aggregation.html

    :param field:
    :param ranges:
    :param keyed:
    """
    type: str = "ip_range"

    def __init__(self, field: Union[_BaseField, str], ranges: List[Union[Dict, IPRanges]], keyed: bool = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._ranges: List[Union[Dict, IPRanges]] = ranges
        self._keyed: bool = keyed
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
            "ranges": [ra._build() if isinstance(ra, IPRanges) else ra for ra in self._ranges],
        }
        if self._keyed is not None:
            body["keyed"] = self._keyed
        return body


class Missing(Aggregation):
    """
    缺失聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-missing-aggregation.html

    A field data based single bucket aggregation, that creates a bucket of all documents in the current document set
    context that are missing a field value (effectively, missing a field or having the configured NULL value set). This
    aggregator will often be used in conjunction with other field data bucket aggregators (such as ranges) to return
    information for all the documents that could not be placed in any of the other buckets due to missing field data
    values.
    基于字段数据的单个存储桶聚合，用于创建当前文档集上下文中缺少字段值（实际上，缺少字段或设置了配置的 NULL 值）的所有文档的存储桶。此聚合器通常
    与其他字段数据桶聚合器（如范围）结合使用，以返回由于缺少字段数据值而无法放入任何其他存储桶中的所有文档的信息。

    :param field:
    """
    type: str = "missing"

    def __init__(self, field: Union[_BaseField, str]):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        return body


class MultiTerms(Aggregation):
    """
    多术语聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-multi-terms-aggregation.html

    A multi-bucket value source based aggregation where buckets are dynamically built - one per unique set of values.
    The multi terms aggregation is very similar to the terms aggregation, however in most cases it will be slower than
    the terms aggregation and will consume more memory. Therefore, if the same set of fields is constantly used, it
    would be more efficient to index a combined key for this fields as a separate field and use the terms aggregation
    on this field.
    The multi_term aggregations are the most useful when you need to sort by a number of document or a metric
    aggregation on a composite key and get top N results. If sorting is not required and all values are expected to be
    retrieved using nested terms aggregation or composite aggregations will be a faster and more memory efficient
    solution.

    基于多存储桶值源的聚合，其中存储桶是动态构建的 - 每组唯一值一个。多术语聚合与术语聚合非常相似，但在大多数情况下，它比术语聚合慢，并且会消耗
    更多内存。因此，如果始终使用同一组字段，则将此字段的组合键作为单独的字段编制索引并在此字段上使用术语聚合会更有效。
    当您需要按组合键上的多个文档或量度聚合进行排序并获取前 N 个结果时，multi_term聚合最有用。如果不需要排序，并且希望使用嵌套术语聚合或复合聚
    合来检索所有值，则聚合将是一种更快、更节省内存的解决方案。

    :param terms:
    :param size:
        Optional. Defines how many term buckets should be returned out of the overall terms list. Defaults to 10.
        自选。定义应从整体术语列表中返回多少个术语存储桶。默认值为 10。
    :param shard_size:
        Optional. The higher the requested size is, the more accurate the results will be, but also, the more expensive
        it will be to compute the final results. The default shard_size is (size * 1.5 + 10).
        自选。请求的大小越大，结果就越准确，但计算最终结果的成本也就越高。默认shard_size为 （size * 1.5 + 10）。
    :param show_term_doc_count_error:
        Optional. Calculates the doc count error on per term basis. Defaults to false
        自选。按术语计算文档计数错误。默认为 false
    :param order:
        Optional. Specifies the order of the buckets. Defaults to the number of documents per bucket. The bucket terms
        value is used as a tiebreaker for buckets with the same document count.
        自选。指定存储桶的顺序。默认为每个存储桶的文档数。存储桶术语值用作具有相同文档计数的存储桶的仲裁。
    :param min_doc_count:
        Optional. The minimal number of documents in a bucket for it to be returned. Defaults to 1.
        自选。存储桶中要返回的最小文档数。默认值为 1。
    :param shard_min_doc_count:
        Optional. The minimal number of documents in a bucket on each shard for it to be returned. Defaults to min_doc_count.
        自选。每个分片上存储桶中要返回的最小文档数。默认为 min_doc_count。
    :param collect_mode:
        Optional. Specifies the strategy for data collection. The depth_first or breadth_first modes are supported.
        Defaults to breadth_first.
        自选。指定数据收集策略。支持 depth_first 或 breadth_first 模式。默认为 breadth_first。
    """
    type: str = "multi_terms"

    def __init__(
            self, terms: List[Union["Terms", Dict]], size: int = None, shard_size: int = None,
            show_term_doc_count_error: bool = None, order: List[Union[Order, Dict]] = None, min_doc_count: int = None,
            shard_min_doc_count: int = None,
            collect_mode: Union[Literal["breadth_first", "depth_first"], CollectMode] = None
    ):
        super().__init__()
        if len(terms) < 2:
            raise ValueError("MultiTerms至少需要两个terms")
        self._terms: List[Union["Terms", Dict]] = terms
        self._size: int = size
        self._shard_size: int = shard_size
        self._show_term_doc_count_error: bool = show_term_doc_count_error
        self._order: List[Union[Order, Dict]] = order
        self._min_doc_count: int = min_doc_count
        self._shard_min_doc_count: int = shard_min_doc_count
        self._collect_mode: Union[Literal["breadth_first", "depth_first"], CollectMode, None] = collect_mode
        return

    def _build(self) -> Dict[str, Any]:
        body: Dict = {
            "terms": [term if isinstance(term, Dict) else term._build() for term in self._terms]
        }
        if self._size is not None:
            body["size"] = self._size
        if self._shard_size is not None:
            body["shard_size"] = self._shard_size
        if self._show_term_doc_count_error is not None:
            body["show_term_doc_count_error"] = self._show_term_doc_count_error
        if self._order:
            body["order"] = [order if isinstance(order, Dict) else order._build() for order in self._order]
        if self._min_doc_count is not None:
            body["min_doc_count"] = self._min_doc_count
        if self._shard_min_doc_count is not None:
            body["shard_min_doc_count"] = self._shard_min_doc_count
        if self._collect_mode:
            body["collect_mode"] = (
                self._collect_mode if isinstance(self._collect_mode, str) else self._collect_mode.value
            )
        return body


class Nested(Aggregation):
    """
    嵌套聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-nested-aggregation.html

    A special single bucket aggregation that enables aggregating nested documents.
    一个特殊的单个存储桶聚合，支持聚合嵌套文档。

    :param path:
    """
    type: str = "nested"

    def __init__(self, path: Union[_BaseField, str]):
        super().__init__()
        self._path: Union[_BaseField, str] = path
        return

    def _build(self) -> Dict:
        return {
            "path": self._path if isinstance(self._path, str) else self._path._field_name
        }


class Parent(Aggregation):
    """
    父聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-parent-aggregation.html

    A special single bucket aggregation that selects parent documents that have the specified type, as defined in a join field.
    This aggregation has a single option:
        - type - The child type that should be selected.

    一种特殊的单个存储桶聚合，用于选择具有指定类型（如联接字段中所定义）的父文档。
    此聚合只有一个选项：
        - type - 应选择的子类型。

    :param type_:
        The child type that should be selected.
        应选择的子类型。
    """
    type: str = "parent"

    def __init__(self, type_: str):
        super().__init__()
        self._type: str = type_
        return

    def _build(self) -> Dict:
        return {
            "type": self._type
        }


class Range(Aggregation):
    """
    范围聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-range-aggregation.html

    A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a
    bucket. During the aggregation process, the values extracted from each document will be checked against each bucket
    range and "bucket" the relevant/matching document. Note that this aggregation includes the from value and excludes
    the to value for each range.
    基于多存储桶值源的聚合，使用户能够定义一组范围 — 每个范围代表一个存储桶。在聚合过程中，将根据每个存储桶范围检查从每个文档中提取的值，并“存储”
    相关/匹配文档。请注意，此聚合包括 from 值，不包括每个范围的 to 值。

    :param field:
    :param ranges:
    :param keyed:
        Setting the keyed flag to true will associate a unique string key with each bucket and return the ranges as a
        hash rather than an array
        将 keyed 标志设置为 true 将为每个存储桶关联一个唯一的字符串键，并将范围作为哈希而不是数组返回
    """
    type: str = "range"

    def __init__(self, field: Union[_BaseField, str], ranges: List[Union["Range", Dict]], keyed: bool = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._ranges: List[Union["Range", Dict]] = ranges
        self._keyed: bool = keyed
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
            "ranges": [range_ if isinstance(range_, Dict) else range_._build() for range_ in self._ranges]
        }
        if self._keyed is not None:
            body["keyed"] = self._keyed
        return body


class RareTerms(Aggregation):
    """
    稀有术语聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-rare-terms-aggregation.html

    A multi-bucket value source based aggregation which finds "rare" terms — terms that are at the long-tail of
    the distribution and are not frequent. Conceptually, this is like a terms aggregation that is sorted by _count
    ascending. As noted in the terms aggregation docs, actually ordering a terms agg by count ascending has unbounded
    error. Instead, you should use the rare_terms aggregation
    基于多存储桶值源的聚合，用于查找“稀有”术语，即位于分布的长尾且不频繁的术语。从概念上讲，这类似于按_count升序排序的术语聚合。如术语聚合文档
    中所述，实际上按计数升序对术语聚合进行排序具有无限的错误。相反，您应该使用 rare_terms 聚合

    :param field:
        The field we wish to find rare terms in
        我们希望在其中找到稀有词的领域
    :param max_doc_count:
        The maximum number of documents a term should appear in.
        术语应出现在的最大文档数。
    :param precision:
        The precision of the internal CuckooFilters. Smaller precision leads to better approximation, but higher memory
        usage. Cannot be smaller than 0.00001
        内部 CuckooFilters 的精度。精度越小，近似值越好，但内存使用率越高。不能小于 0.00001
    :param include:
        Terms that should be included in the aggregation
        聚合中应包含的术语
    :param exclude:
        Terms that should be excluded from the aggregation
        应从聚合中排除的术语
    :param missing:
        The value that should be used if a document does not have the field being aggregated
        如果文档没有要聚合的字段，则应使用的值
    """
    type: str = "rare_terms"

    def __init__(
            self, field: Union[_BaseField, str], max_doc_count: int = None, precision: Number = None,
            include: Union[str, Dict, _BaseField, List[Union[str, _BaseField]]] = None,
            exclude: Union[str, _BaseField, List[Union[str, _BaseField]]] = None, missing: Any = None
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._max_doc_count: int = max_doc_count
        self._precision: Number = precision
        self._include: Union[str, Dict, _BaseField, List[Union[str, _BaseField]]] = include
        self._exclude: Union[str, _BaseField, List[Union[str, _BaseField]]] = exclude
        self._missing: Any = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name
        }
        if self._max_doc_count is not None:
            body["max_doc_count"] = self._max_doc_count
        if self._precision is not None:
            body["precision"] = self._precision
        if self._include:
            body["include"] = (
                self._include
                if isinstance(self._include, (str, Dict))
                else (
                    self._include._field_name
                    if isinstance(self._include, _BaseField)
                    else [inc if isinstance(inc, str) else inc._field_name for inc in self._include]
                )
            )
        if self._exclude:
            body["exclude"] = (
                self._exclude
                if isinstance(self._exclude, str)
                else (
                    self._exclude._field_name
                    if isinstance(self._exclude, _BaseField)
                    else [exc if isinstance(exc, str) else exc._field_name for exc in self._exclude]
                )
            )
        if self._missing is not None:
            body["missing"] = self._missing
        return body


class ReverseNested(Aggregation):
    """
    反向嵌套聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-reverse-nested-aggregation.html

    A special single bucket aggregation that enables aggregating on parent docs from nested documents. Effectively this
    aggregation can break out of the nested block structure and link to other nested structures or the root document,
    which allows nesting other aggregations that aren’t part of the nested object in a nested aggregation.
    The reverse_nested aggregation must be defined inside a nested aggregation.
    Options:
        path - Which defines to what nested object field should be joined back. The default is empty, which means that
            it joins back to the root / main document level. The path cannot contain a reference to a nested object
            field that falls outside the nested aggregation’s nested structure a reverse_nested is in.

    一种特殊的单个存储桶聚合，支持从嵌套文档聚合父文档。实际上，这种聚合可以跳出嵌套块结构并链接到其他嵌套结构或根文档，从而允许在嵌套聚合中嵌套不
    属于嵌套对象的其他聚合。
    必须在嵌套聚合中定义 reverse_nested 聚合。
    Options:
        path - 定义应重新连接哪个嵌套对象字段。默认值为空，这意味着它连接回根/主文档级别。该路径不能包含对嵌套对象字段的引用，该字段位于
        reverse_nested所在的嵌套聚合的嵌套结构之外。
    """
    type: str = "reverse_nested"

    def __init__(self):
        super().__init__()
        return

    def _build(self) -> Dict:
        return {}


class Sampler(Aggregation):
    """
    采样器聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-sampler-aggregation.html

    A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents.
    一种筛选聚合，用于将任何子聚合的处理限制为得分最高的文档样本。

    :param shard_size:
        The shard_size parameter limits how many top-scoring documents are collected in the sample processed on each
        shard. The default value is 100.
        shard_size 参数限制在每个分片上处理的样本中收集的得分最高的文档数。默认值为 100。
    """
    type: str = "sampler"

    def __init__(self, shard_size: int = None):
        super().__init__()
        self._shard_size: int = shard_size
        return

    def _build(self) -> Dict:
        body: Dict = {}
        if self._shard_size is not None:
            body["shard_size"] = self._shard_size
        return body


class SignificantTerms(Aggregation):
    """
    重要术语聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-significantterms-aggregation.html

    An aggregation that returns interesting or unusual occurrences of terms in a set.
    返回集中有趣或不常见术语的聚合。

    :param field:
    :param jlh:
        The JLH score can be used as a significance score by adding the parameter
        The scores are derived from the doc frequencies in foreground and background sets. The absolute change in
        popularity (foregroundPercent - backgroundPercent) would favor common terms whereas the relative change in
        popularity (foregroundPercent/ backgroundPercent) would favor rare terms. Rare vs common is essentially a
        precision vs recall balance and so the absolute and relative changes are multiplied to provide a sweet spot
        between precision and recall.

        JLH 分数可以通过添加参数
        分数源自前台和背景集中的文档频率。受欢迎程度的绝对变化 （foregroundPercent - backgroundPercent） 将有利于常用词，而受欢迎程度的
        相对变化 （foregroundPercent/ backgroundPercent） 将有利于稀有词。罕见与普通本质上是精度与召回率的平衡，因此绝对和相对变化相乘，
        以提供精度和召回率之间的最佳平衡点。
    :param mutual_information:
        Mutual information does not differentiate between terms that are descriptive for the subset or for documents
        outside the subset. The significant terms therefore can contain terms that appear more or less frequent in the
        subset than outside the subset. To filter out the terms that appear less often in the subset than in documents
        outside the subset, include_negatives can be set to false.
        Per default, the assumption is that the documents in the bucket are also contained in the background. If instead
        you defined a custom background filter that represents a different set of documents that you want to compare to,
        set
            "background_is_superset": false

        互信息不区分对子集或子集外的文档具有描述性的术语。因此，重要术语可以包含在子集中出现频率高于子集外出现频率的术语。要筛选出在子集中出现频
        率低于在子集外文档中出现频率的术语，可以将 include_negatives 设置为 false。
        默认情况下，假设存储桶中的文档也包含在后台中。相反，如果您定义了一个自定义背景过滤器，该过滤器表示要比较的一组不同的文档，请将
            "background_is_superset": false
    :param chi_square:
        Chi square behaves like mutual information and can be configured with the same parameters include_negatives and
        background_is_superset.
        卡方的行为类似于互信息，可以使用相同的参数 include_negatives 和 background_is_superset 进行配置。
    :param gnd:
        gnd also accepts the background_is_superset parameter.
        gnd 也接受 background_is_superset 参数。
    :param p_value:
        The p-value is the probability of obtaining test results at least as extreme as the results actually observed,
        under the assumption that the null hypothesis is correct. The p-value is calculated assuming that the foreground
        set and the background set are independent Bernoulli trials, with the null hypothesis that the probabilities are
        the same.
        p 值是在假设原假设正确的情况下，获得至少与实际观察到的结果一样极端的检验结果的概率。计算 p 值时，假设前景集和背景集是独立的伯努利试验，
        并且原假设概率相同。
    :param background_filter:
    :param percentage:
        A simple calculation of the number of documents in the foreground sample with a term divided by the number of
        documents in the background with the term. By default this produces a score greater than zero and less than one.
        对前台样本中的文档数进行简单计算，其中有一个术语，除以背景中带有该术语的文档数。默认情况下，这将生成大于 0 且小于 1 的分数。
    :param min_doc_count:
        Terms that score highly will be collected on a shard level and merged with the terms collected from other shards
        in a second step. However, the shard does not have the information about the global term frequencies available.
        The decision if a term is added to a candidate list depends only on the score computed on the shard using local
        shard frequencies, not the global frequencies of the word. The min_doc_count criterion is only applied after
        merging local terms statistics of all shards. In a way the decision to add the term as a candidate is made
        without being very certain about if the term will actually reach the required min_doc_count. This might cause
        many (globally) high frequent terms to be missing in the final result if low frequent but high scoring terms
        populated the candidate lists. To avoid this, the shard_size parameter can be increased to allow more candidate
        terms on the shards. However, this increases memory consumption and network traffic.
        得分高的术语将在分片级别收集，并在第二步中与从其他分片收集的术语合并。但是，分片没有有关全局术语频率的信息。是否将术语添加到候选列表的决
        定仅取决于使用本地分片频率在分片上计算的分数，而不是单词的全局频率。min_doc_count标准仅在合并所有分片的本地术语统计信息后应用。在某种
        程度上，将术语添加为候选词的决定是在非常确定该术语是否真的会达到所需min_doc_count的情况下做出的。如果候选词填充了低频率但高得分的术语，
        这可能会导致最终结果中缺少许多（全局）高频率的术语。为避免这种情况，可以增加 shard_size 参数以允许分片上出现更多候选术语。但是，这会增
        加内存消耗和网络流量。
    :param shard_min_doc_count:
        The parameter shard_min_doc_count regulates the certainty a shard has if the term should actually be added to
        the candidate list or not with respect to the min_doc_count. Terms will only be considered if their local shard
        frequency within the set is higher than the shard_min_doc_count. If your dictionary contains many low frequent
        terms and you are not interested in those (for example misspellings), then you can set the shard_min_doc_count
        parameter to filter out candidate terms on a shard level that will with a reasonable certainty not reach the
        required min_doc_count even after merging the local counts. shard_min_doc_count is set to 0 per default and has
        no effect unless you explicitly set it.
        参数 shard_min_doc_count 调节分片是否应实际将术语添加到候选列表的确定性min_doc_count。仅当术语在集合中的本地分片频率高于
        shard_min_doc_count时，才会考虑术语。如果您的字典包含许多低频率术语，并且您对这些术语不感兴趣（例如拼写错误），则可以设置
        shard_min_doc_count 参数以在分片级别筛选出候选术语，即使在合并本地计数后，该术语在合理确定性下也不会达到所需的min_doc_count。
        shard_min_doc_count 默认设置为 0，除非您明确设置它，否则不会产生任何影响。
    :param execution_hint:
        There are different mechanisms by which terms aggregations can be executed:
            - by using field values directly in order to aggregate data per-bucket (map)
            - by using global ordinals of the field and allocating one bucket per global ordinal (global_ordinals)
        Elasticsearch tries to have sensible defaults so this is something that generally doesn’t need to be configured.
        global_ordinals is the default option for keyword field, it uses global ordinals to allocates buckets
        dynamically so memory usage is linear to the number of values of the documents that are part of the aggregation
        scope.
        map should only be considered when very few documents match a query. Otherwise the ordinals-based execution mode
        is significantly faster. By default, map is only used when running an aggregation on scripts, since they don’t
        have ordinals.

        可以通过不同的机制来执行术语聚合：
            - 直接使用字段值来聚合每个存储桶 （MAP） 的数据
            - 通过使用字段的全局序数并为每个全局序数分配一个存储桶 （global_ordinals）
        Elasticsearch 会尝试使用合理的默认值，因此这通常不需要配置。
        global_ordinals 是 keyword field 的默认选项，它使用全局序号动态分配存储桶，因此内存使用量与属于聚合范围的文档的值的数量呈线性关系。
        仅当很少有文档与查询匹配时，才应考虑 map。否则，基于序数的执行模式会明显更快。默认情况下，map 仅在脚本上运行聚合时使用，因为它们没有序号。
    """
    type: str = "significant_terms"

    def __init__(
            self, field: Union[_BaseField, str], shard_size: int = None, jlh: Dict = None,
            mutual_information: Dict[Literal["include_negatives", "background_is_superset"], bool] = None,
            chi_square: Dict[Literal["include_negatives", "background_is_superset"], bool] = None,
            gnd: Dict[Literal["background_is_superset"], bool] = None,
            p_value: Dict[Literal["normalize_above", "background_is_superset"], Any] = None,
            background_filter: Iterable[_BaseQueries] = None, percentage: Dict = None, min_doc_count: int = None,
            shard_min_doc_count: int = None, execution_hint: Union[Literal["map", "global_ordinals"], ExecutionHint] = None,
            include: Union[str, Dict, _BaseField, List[Union[str, _BaseField]]] = None,
            exclude: Union[str, _BaseField, List[Union[str, _BaseField]]] = None,
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._shard_size: int = shard_size
        self._jlh: Dict = jlh
        self._mutual_information: Dict[Literal["include_negatives", "background_is_superset"], bool] = mutual_information
        self._chi_square: Dict[Literal["include_negatives", "background_is_superset"], bool] = chi_square
        self._gnd: Dict[Literal["background_is_superset"], bool] = gnd
        self._p_value: Dict[Literal["normalize_above", "background_is_superset"], Any] = p_value
        self._background_filter: Iterable[_BaseQueries] = background_filter
        self._percentage: Dict = percentage
        self._min_doc_count: int = min_doc_count
        self._shard_min_doc_count: int = shard_min_doc_count
        self._execution_hint: Union[Literal["map", "global_ordinals"], ExecutionHint, None] = execution_hint
        self._include: Union[str, Dict, _BaseField, List[Union[str, _BaseField]]] = include
        self._exclude: Union[str, _BaseField, List[Union[str, _BaseField]]] = exclude
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._shard_size is not None:
            body["shard_size"] = self._shard_size
        if self._jlh:
            body["jlh"] = self._jlh
        if self._mutual_information:
            body["mutual_information"] = self._mutual_information
        if self._chi_square:
            body["chi_square"] = self._chi_square
        if self._gnd:
            body["chi_square"]["gnd"] = self._gnd
        if self._p_value:
            body["chi_square"]["p_value"] = self._p_value
        if self._background_filter:
            body["background_filter"] = {bf.type: bf._build() for bf in self._background_filter}
        if self._percentage:
            body["percentage"] = self._percentage
        if self._min_doc_count is not None:
            body["min_doc_count"] = self._min_doc_count
        if self._shard_min_doc_count is not None:
            body["shard_min_doc_count"] = self._shard_min_doc_count
        if self._execution_hint:
            body["execution_hint"] = self._execution_hint
        if self._include:
            body["include"] = (
                self._include
                if isinstance(self._include, (str, Dict))
                else (
                    self._include._field_name
                    if isinstance(self._include, _BaseField)
                    else [inc if isinstance(inc, str) else inc._field_name for inc in self._include]
                )
            )
        if self._exclude:
            body["exclude"] = (
                self._exclude
                if isinstance(self._exclude, str)
                else (
                    self._exclude._field_name
                    if isinstance(self._exclude, _BaseField)
                    else [exc if isinstance(exc, str) else exc._field_name for exc in self._exclude]
                )
            )
        return body


class SignificantText(Aggregation):
    """
    重要文本聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-significanttext-aggregation.html

    An aggregation that returns interesting or unusual occurrences of free-text terms in a set. It is like the
    significant terms aggregation but differs in that:
        - It is specifically designed for use on type text fields
        - It does not require field data or doc-values
        - It re-analyzes text content on-the-fly meaning it can also filter duplicate sections of noisy text that
            otherwise tend to skew statistics.
    Re-analyzing large result sets will require a lot of time and memory. It is recommended that the significant_text
    aggregation is used as a child of either the sampler or diversified sampler aggregation to limit the analysis to a
    small selection of top-matching documents e.g. 200. This will typically improve speed, memory use and quality of
    results.

    返回集中出现的有趣或不常见的自由文本词的聚合。它与 significant terms 聚合类似，但不同之处在于：
        - 它专为用于类型文本字段而设计
        - 它不需要字段数据或文档值
        - 它可以动态地重新分析文本内容，这意味着它还可以过滤掉嘈杂文本的重复部分，否则这些部分往往会扭曲统计数据。
    重新分析大型结果集将需要大量时间和内存。建议将 significant_text 聚合用作 sampler 或 diversified sampler 聚合的子项，以将分析限制为一
    小部分顶级匹配文档，例如 200 个。这通常会提高速度、内存使用和结果质量。

    :param field:
    :param filter_duplicate_text:
        Free-text fields often contain a mix of original content and mechanical copies of text (cut-and-paste
        biographies, email reply chains, retweets, boilerplate headers/footers, page navigation menus, sidebar news
        links, copyright notices, standard disclaimers, addresses).
        In real-world data these duplicate sections of text tend to feature heavily in significant_text results if they
        aren’t filtered out. Filtering near-duplicate text is a difficult task at index-time but we can cleanse the data
        on-the-fly at query time using the filter_duplicate_text setting.

        自由文本字段通常包含原始内容和文本的机械副本（剪切和粘贴的传记、电子邮件回复链、转发、样板页眉/页脚、页面导航菜单、侧边栏新闻链接、版权声
        明、标准免责声明、地址）的混合。
        在实际数据中，如果不筛选掉这些重复的文本部分，它们往往会在significant_text结果中占据重要地位。在索引时筛选近乎重复的文本是一项艰巨的任
        务，但我们可以在查询时使用 filter_duplicate_text 设置动态清理数据。
    :param size:
        The size parameter can be set to define how many term buckets should be returned out of the overall terms list.
        By default, the node coordinating the search process will request each shard to provide its own top term buckets
        and once all shards respond, it will reduce the results to the final list that will then be returned to the
        client. If the number of unique terms is greater than size, the returned list can be slightly off and not
        accurate (it could be that the term counts are slightly off and it could even be that a term that should have
        been in the top size buckets was not returned).
        可以设置 size 参数来定义应从整体术语列表中返回多少个术语存储桶。默认情况下，协调搜索过程的节点将请求每个分片提供自己的顶级术语存储桶，
        一旦所有分片都响应，它将结果减少到最终列表，然后返回给客户端。如果唯一术语的数量大于大小，则返回的列表可能会略有偏差且不准确（可能是术语
        计数略有偏差，甚至可能是本应位于顶级存储桶中的术语未返回）。
    :param min_doc_count:
        It is possible to only return terms that match more than a configured number of hits using the min_doc_count
        option. The Default value is 3.
        Terms that score highly will be collected on a shard level and merged with the terms collected from other shards
        in a second step. However, the shard does not have the information about the global term frequencies available.
        The decision if a term is added to a candidate list depends only on the score computed on the shard using local
        shard frequencies, not the global frequencies of the word. The min_doc_count criterion is only applied after
        merging local terms statistics of all shards. In a way the decision to add the term as a candidate is made
        without being very certain about if the term will actually reach the required min_doc_count. This might cause
        many (globally) high frequent terms to be missing in the final result if low frequent but high scoring terms
        populated the candidate lists. To avoid this, the shard_size parameter can be increased to allow more candidate
        terms on the shards. However, this increases memory consumption and network traffic.

        使用 min_doc_count 选项，只能返回与配置的点击数匹配的术语。的 默认值为 3。
        得分高的术语将在分片级别收集，并在第二步中与从其他分片收集的术语合并。但是，分片没有有关全局术语频率的可用信息。是否将术语添加到候选列表
        的决定仅取决于使用本地分片频率在分片上计算的分数，而不是单词的全局频率。min_doc_count标准仅在合并所有分片的本地术语统计信息后应用。在
        某种程度上，将术语添加为候选词的决定是在不太确定该术语是否真的会达到所需min_doc_count的情况下做出的。如果候选词填充了低频率但高得分的
        术语，这可能会导致最终结果中缺少许多（全局）高频率的术语。为避免这种情况，可以增加 shard_size 参数以允许分片上出现更多候选术语。但是，
        这会增加内存消耗和网络流量。
    :param shard_min_doc_count:
        The parameter shard_min_doc_count regulates the certainty a shard has if the term should actually be added to
        the candidate list or not with respect to the min_doc_count. Terms will only be considered if their local shard
        frequency within the set is higher than the shard_min_doc_count. If your dictionary contains many low frequent
        terms and you are not interested in those (for example misspellings), then you can set the shard_min_doc_count
        parameter to filter out candidate terms on a shard level that will with a reasonable certainty not reach the
        required min_doc_count even after merging the local counts. shard_min_doc_count is set to 0 per default and has
        no effect unless you explicitly set it.
        参数 shard_min_doc_count 调节分片是否应实际将术语添加到候选列表的确定性min_doc_count。仅当术语在集中的本地分片频率高于
        shard_min_doc_count时，才会考虑术语。如果您的字典包含许多低频率术语，并且您对这些术语不感兴趣（例如拼写错误），则可以设置
        shard_min_doc_count 参数以在分片级别筛选出候选术语，即使在合并本地计数后，该术语在合理确定的情况下也不会达到所需的min_doc_count。
        shard_min_doc_count 默认设置为 0，除非您明确设置它，否则不会产生任何影响。
    :param background_filter:
        The default source of statistical information for background term frequencies is the entire index and this scope
        can be narrowed through the use of a background_filter to focus in on significant terms within a narrower context
        背景词频率的默认统计信息来源是整个索引，可以通过使用background_filter来缩小范围，以关注较窄上下文中的重要词语
    :param source_fields:
        Ordinarily the indexed field name and the original JSON field being retrieved share the same name. However with
        more complex field mappings using features like copy_to the source JSON field(s) and the indexed field being
        aggregated can differ. In these cases it is possible to list the JSON _source fields from which text will be
        analyzed using the source_fields parameter
        通常，索引字段名称和正在检索的原始 JSON 字段共享相同的名称。但是，对于更复杂的字段映射，使用copy_to等功能，源 JSON 字段和正在聚合的
        索引字段可能会有所不同。在这些情况下，可以使用 source_fields 参数列出将从中分析文本的 JSON _source字段
    :param include:
    :param exclude:
    """
    type: str = "significant_text"

    def __init__(
            self, field: Union[_BaseField, str], filter_duplicate_text: bool = None, size: int = None,
            min_doc_count: int = None, shard_min_doc_count: int = None,
            background_filter: Iterable[_BaseQueries] = None, source_fields: Iterable[Union[_BaseField, str]] = None,
            include: Union[str, Dict, _BaseField, List[Union[str, _BaseField]]] = None,
            exclude: Union[str, _BaseField, List[Union[str, _BaseField]]] = None,
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._filter_duplicate_text: bool = filter_duplicate_text
        self._size: int = size
        self._min_doc_count: int = min_doc_count
        self._shard_min_doc_count: int = shard_min_doc_count
        self._background_filter: Iterable[_BaseQueries] = background_filter
        self._source_fields: Iterable[Union[_BaseField, str]] = source_fields
        self._include: Union[str, Dict, _BaseField, List[Union[str, _BaseField]]] = include
        self._exclude: Union[str, _BaseField, List[Union[str, _BaseField]]] = exclude
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._filter_duplicate_text is not None:
            body["filter_duplicate_text"] = self._filter_duplicate_text
        if self._size is not None:
            body["size"] = self._size
        if self._min_doc_count is not None:
            body["min_doc_count"] = self._min_doc_count
        if self._shard_min_doc_count is not None:
            body["shard_min_doc_count"] = self._shard_min_doc_count
        if self._background_filter:
            body["background_filter"] = {bf.type: bf._build() for bf in self._background_filter}
        if self._source_fields:
            body["source_fields"] = [sf if isinstance(sf, str) else sf._field_name for sf in self._source_fields]
        if self._include:
            body["include"] = (
                self._include
                if isinstance(self._include, (str, Dict))
                else (
                    self._include._field_name
                    if isinstance(self._include, _BaseField)
                    else [inc if isinstance(inc, str) else inc._field_name for inc in self._include]
                )
            )
        if self._exclude:
            body["exclude"] = (
                self._exclude
                if isinstance(self._exclude, str)
                else (
                    self._exclude._field_name
                    if isinstance(self._exclude, _BaseField)
                    else [exc if isinstance(exc, str) else exc._field_name for exc in self._exclude]
                )
            )
        return body


    def aggs(self, **aggs: "Aggregation"):
        """
        The significant_text aggregation intentionally does not support the addition of child aggregations because:
            - It would come with a high memory cost
            - It isn’t a generally useful feature and there is a workaround for those that need it
        The volume of candidate terms is generally very high and these are pruned heavily before the final results are
        returned. Supporting child aggregations would generate additional churn and be inefficient. Clients can always
        take the heavily-trimmed set of results from a significant_text request and make a subsequent follow-up query
        using a terms aggregation with an include clause and child aggregations to perform further analysis of selected
        keywords in a more efficient fashion.

        significant_text 聚合有意不支持添加子聚合，因为：
            - 它会带来高昂的内存成本
            - 它不是一个通常有用的功能，对于需要它的人来说，有一个解决方法
        候选词的数量通常非常高，在返回最终结果之前，这些词会被大量修剪。支持子聚合会产生额外的流失，并且效率低下。客户端始终可以从
        significant_text 请求中获取经过大量修剪的结果集，并使用带有 include 子句和子聚合的术语聚合进行后续查询，以更高效的方式对所选关键字
        执行进一步分析。
        :param aggs:
        :return:
        """
        raise NotImplementedError("significant_text 聚合有意不支持添加子聚合")


class Terms(Aggregation):
    """
    术语聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-terms-aggregation.html

    A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value.
    基于多存储桶值源的聚合，其中存储桶是动态构建的 - 每个唯一值一个。

    :param field: 字段
    :param size:
        By default, the terms aggregation returns the top ten terms with the most documents. Use the size parameter to
        return more terms, up to the search.max_buckets limit.
        If your data contains 100 or 1000 unique terms, you can increase the size of the terms aggregation to return
        them all. If you have more unique terms and you need them all, use the composite aggregation instead.
        Larger values of size use more memory to compute and, push the whole aggregation close to the max_buckets limit.
        You’ll know you’ve gone too large if the request fails with a message about max_buckets.

        默认情况下，术语聚合返回文档最多的前 10 个术语。使用 size 参数可返回更多术语，最高可达 search.max_buckets 限制。
        如果您的数据包含 100 或 1000 个唯一术语，则可以增加术语聚合的大小以返回所有术语。如果您有更多唯一术语并且需要所有术语，请改用复合聚合。
        较大的 size 值会使用更多的内存进行计算，并将整个聚合推向接近 max_buckets 限制。如果请求失败并显示有关 max_buckets 的消息，您将知道
        您做得太大。
    :param shard_size:
        To get more accurate results, the terms agg fetches more than the top size terms from each shard. It fetches the
        top shard_size terms, which defaults to size * 1.5 + 10.
        This is to handle the case when one term has many documents on one shard but is just below the size threshold on
        all other shards. If each shard only returned size terms, the aggregation would return an partial doc count for
        the term. So terms returns more terms in an attempt to catch the missing terms. This helps, but it’s still quite
        possible to return a partial doc count for a term. It just takes a term with more disparate per-shard doc
        counts.
        You can increase shard_size to better account for these disparate doc counts and improve the accuracy of the
        selection of top terms. It is much cheaper to increase the shard_size than to increase the size. However, it
        still takes more bytes over the wire and waiting in memory on the coordinating node.
            This guidance only applies if you’re using the terms aggregation’s default sort order. If you’re sorting by
            anything other than document count in descending order, see Order.
            shard_size cannot be smaller than size (as it doesn’t make much sense). When it is, Elasticsearch will
            override it and reset it to be equal to size.

        为了获得更准确的结果，terms agg 从每个分片中获取的词数超过了 top size。它获取前 shard_size 个术语，默认为 size * 1.5 + 10。
        这是为了处理一个术语在一个分片上包含多个文档，但略低于所有其他分片上的大小阈值的情况。如果每个分片仅返回大小术语，则聚合将返回该术语的部分
        文档计数。因此，terms 返回更多术语，以尝试捕获缺失的术语。这有帮助，但仍然很有可能返回一个术语的部分 doc count。它只需要一个术语，每个
        分片的 doc 计数差异更大。
        您可以增加shard_size以更好地考虑这些不同的文档计数，并提高选择热门术语的准确性。增加 shard_size 比增加大小要便宜得多。但是，它仍然需
        要更多的字节通过网络并在协调节点上的内存中等待。
            仅当使用术语聚合的默认排序顺序时，本指南才适用。如果您按降序排序文档计数以外的任何内容，请参阅 顺序。
            shard_size 不能小于 size（因为它没有多大意义）。当它被覆盖时，Elasticsearch 将覆盖它并将其重置为等于大小。
    :param sum_other_doc_count:
        Even with a larger shard_size value, doc_count values for a terms aggregation may be approximate. As a result,
        any sub-aggregations on the terms aggregation may also be approximate.
        sum_other_doc_count is the number of documents that didn’t make it into the the top size terms. If this is
        greater than 0, you can be sure that the terms agg had to throw away some buckets, either because they didn’t
        fit into size on the coordinating node or they didn’t fit into shard_size on the data node.

        即使 shard_size 值较大，术语聚合的 doc_count 值也可能是近似值。因此，项聚合上的任何子聚合也可能是近似的。
        sum_other_doc_count 是未进入 top size 术语的文档数。如果这大于 0，则可以确定术语 agg 必须丢弃一些存储桶，要么是因为它们不适合协调
        节点上的大小，要么是它们不适合 data 节点上的 shard_size。
    :param show_term_doc_count_error:
        If you set the show_term_doc_count_error parameter to true, the terms aggregation will include
        doc_count_error_upper_bound, which is an upper bound to the error on the doc_count returned by each shard. It’s
        the sum of the size of the largest bucket on each shard that didn’t fit into shard_size.
        In more concrete terms, imagine there is one bucket that is very large on one shard and just outside the
        shard_size on all the other shards. In that case, the terms agg will return the bucket because it is large, but
        it’ll be missing data from many documents on the shards where the term fell below the shard_size threshold.
        doc_count_error_upper_bound is the maximum number of those missing documents.

        如果您将 show_term_doc_count_error 参数设置为 true，则术语聚合将包括 doc_count_error_upper_bound，这是每个分片返回的
        doc_count 上错误的上限。它是每个分片上不适合 shard_size 的最大存储桶的大小之和。
        更具体地说，假设有一个存储桶在一个分片上非常大，而在所有其他分片上，它就在shard_size之外。在这种情况下，terms agg 将返回存储桶，因为
        它很大，但它将缺少分片上 term 低于 shard_size 阈值的许多文档的数据。doc_count_error_upper_bound 是这些缺失文档的最大数量。
    :param order:
        By default, the terms aggregation orders terms by descending document _count. Use the order parameter to specify
        a different sort order.
        默认情况下，术语聚合按文档降序对术语_count排序。使用 order 参数指定不同的排序顺序。
    :param min_doc_count:
        It is possible to only return terms that match more than a configured number of hits using the min_doc_count
        option
        使用 min_doc_count 选项，只能返回与配置的点击数匹配的词
    :param shard_min_doc_count:
        The parameter shard_min_doc_count regulates the certainty a shard has if the term should actually be added to
        the candidate list or not with respect to the min_doc_count. Terms will only be considered if their local shard
        frequency within the set is higher than the shard_min_doc_count. If your dictionary contains many low frequent
        terms and you are not interested in those (for example misspellings), then you can set the shard_min_doc_count
        parameter to filter out candidate terms on a shard level that will with a reasonable certainty not reach the
        required min_doc_count even after merging the local counts. shard_min_doc_count is set to 0 per default and has
        no effect unless you explicitly set it.
            Setting min_doc_count=0 will also return buckets for terms that didn’t match any hit. However, some of the
            returned terms which have a document count of zero might only belong to deleted documents or documents from
            other types, so there is no warranty that a match_all query would find a positive document count for those
            terms.
            When NOT sorting on doc_count descending, high values of min_doc_count may return a number of buckets which
            is less than size because not enough data was gathered from the shards. Missing buckets can be back by
            increasing shard_size. Setting shard_min_doc_count too high will cause terms to be filtered out on a shard
            level. This value should be set much lower than min_doc_count/#shards.

        参数 shard_min_doc_count 调节分片是否应将术语实际添加到候选列表的确定性 min_doc_count。仅当术语在集中的本地分片频率高于
        shard_min_doc_count 时，才会考虑术语。如果您的词典包含许多低频率术语，而您对这些术语不感兴趣（例如拼写错误），则可以设置
        shard_min_doc_count 参数，以在分片级别筛选出候选术语，即使在合并本地计数后，这些术语在合理确定的情况下也不会达到所需的
        min_doc_count。shard_min_doc_count 默认设置为 0，除非您明确设置它，否则不会产生任何影响。
            设置 min_doc_count=0 还将返回与任何匹配都不匹配的术语的存储桶。但是，某些返回的文档计数为零的术语可能只属于已删除的文档或其他
            类型的文档，因此无法保证match_all查询会找到这些术语的正文档计数。
            如果不按降序doc_count排序，较高的 min_doc_count 值可能会返回小于大小的存储桶数量，因为没有从分片中收集到足够的数据。可以通过
            增加 shard_size来恢复缺失的存储桶。将 shard_min_doc_count 设置得太高将导致在分片级别筛选掉术语。此值应设置为远低于
            min_doc_count/#shards。
    :param include:
        It is possible to filter the values for which buckets will be created. This can be done using the include and
        exclude parameters which are based on regular expression strings or arrays of exact values. Additionally,
        include clauses can filter using partition expressions.
        可以筛选将为其创建存储桶的值。这可以使用 include 和 exclude 参数来完成，这些参数基于正则表达式字符串或精确值数组。此外，include
        子句可以使用分区表达式进行筛选。
    :param exclude:
    :param collect_mode:
        Deferring calculation of child aggregations
        For fields with many unique terms and a small number of required results it can be more efficient to delay the
        calculation of child aggregations until the top parent-level aggs have been pruned. Ordinarily, all branches of
        the aggregation tree are expanded in one depth-first pass and only then any pruning occurs. In some scenarios
        this can be very wasteful and can hit memory constraints.

        延迟子聚合的计算
        对于具有许多唯一术语和少量所需结果的字段，将子聚合的计算延迟到顶部父级聚合被修剪后，效率会更高。通常，聚合树的所有分支都会在一次深度优先
        的传递中展开，然后才会进行任何修剪。在某些情况下，这可能非常浪费，并且可能会达到内存限制。
    :param execution_hint:
        There are different mechanisms by which terms aggregations can be executed:
            - by using field values directly in order to aggregate data per-bucket (map)
            - by using global ordinals of the field and allocating one bucket per global ordinal (global_ordinals)
        Elasticsearch tries to have sensible defaults so this is something that generally doesn’t need to be configured.
        global_ordinals is the default option for keyword field, it uses global ordinals to allocates buckets
        dynamically so memory usage is linear to the number of values of the documents that are part of the aggregation
        scope.
        map should only be considered when very few documents match a query. Otherwise the ordinals-based execution
        mode is significantly faster. By default, map is only used when running an aggregation on scripts, since they
        don’t have ordinals.

        可以通过不同的机制来执行术语聚合：
            - 直接使用字段值来聚合每个存储桶 （MAP） 的数据
            - 通过使用字段的全局序号并为每个全局序号 （global_ordinals） 分配一个存储桶
        Elasticsearch 会尝试使用合理的默认值，因此这通常不需要配置。
        global_ordinals 是 keyword field 的默认选项，它使用全局序号动态分配存储桶，因此内存使用量与属于聚合范围的文档的值数量呈线性关系。
        仅当很少有文档与查询匹配时，才应考虑 map。否则，基于序数的执行模式会明显更快。默认情况下，map 仅在脚本上运行聚合时使用，因为它们没有序号。
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    :param value_type:
        When running a terms aggregation (or other aggregation, but in practice usually terms) over multiple indices,
        you may get an error that starts with "Failed trying to format bytes…". This is usually caused by two of the
        indices not having the same mapping type for the field being aggregated.
        Use an explicit value_type Although it’s best to correct the mappings, you can work around this issue if the
        field is unmapped in one of the indices. Setting the value_type parameter can resolve the issue by coercing the
        unmapped field into the correct type.

        在多个索引上运行术语聚合（或其他聚合，但实际上通常是术语）时，您可能会收到以“尝试格式化字节失败... ". 这通常是由于两个索引与正在聚合的
        字段的映射类型不同而导致的。
        使用显式value_type尽管最好更正映射，但如果字段在其中一个索引中未映射，则可以解决此问题。设置 value_type 参数可以通过将未映射的字段
        强制转换为正确的类型来解决此问题。
    """
    type: str = "terms"

    def __init__(
            self, field: Union[_BaseField, str], size: int = None, shard_size: int = None,
            sum_other_doc_count: int = None, show_term_doc_count_error: bool = None,
            order: Union[str, OrderType, Order, Dict] = None, min_doc_count: int = None, shard_min_doc_count: int = None,
            include: Union[str, Dict, _BaseField, List[Union[str, _BaseField]]] = None,
            exclude: Union[str, _BaseField, List[Union[str, _BaseField]]] = None,
            collect_mode: Union[str, CollectMode] = None, execution_hint: Union[str, ExecutionHint] = None,
            missing: Any = None, value_type: str = None,
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._size: int = size
        self._shard_size: int = shard_size
        self._sum_other_doc_count: int = sum_other_doc_count
        self._show_term_doc_count_error: bool = show_term_doc_count_error
        self._order: Union[str, OrderType, Order, Dict] = order
        self._min_doc_count: int = min_doc_count
        self._shard_min_doc_count: int = shard_min_doc_count
        self._include: Union[str, Dict, _BaseField, List[Union[str, _BaseField]]] = include
        self._exclude: Union[str, _BaseField, List[Union[str, _BaseField]]] = exclude
        self._collect_mode: Union[str, CollectMode] = collect_mode
        self._execution_hint: Union[str, ExecutionHint] = execution_hint
        self._missing: Any = missing
        self._value_type: str = value_type
        return

    def _build(self) -> Dict[str, Any]:
        body: Dict[str, Any] = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name
        }
        if self._size is not None:
            body["size"] = self._size
        if self._shard_size is not None:
            body["shard_size"] = self._shard_size
        if self._sum_other_doc_count is not None:
            body["sum_other_doc_count"] = self._sum_other_doc_count
        if self._show_term_doc_count_error is not None:
            body["show_term_doc_count_error"] = self._show_term_doc_count_error
        if self._order:
            body["order"] = (
                self._order if isinstance(self._order, (str, Dict)) else (
                    self._order.value if isinstance(self._order, OrderType) else self._order._build()
                )
            )
        if self._min_doc_count is not None:
            body["min_doc_count"] = self._min_doc_count
        if self._shard_min_doc_count is not None:
            body["shard_min_doc_count"] = self._shard_min_doc_count
        if self._include:
            body["include"] = (
                self._include
                if isinstance(self._include, (str, Dict))
                else (
                    self._include._field_name
                    if isinstance(self._include, _BaseField)
                    else [inc if isinstance(inc, str) else inc._field_name for inc in self._include]
                )
            )
        if self._exclude:
            body["exclude"] = (
                self._exclude
                if isinstance(self._exclude, str)
                else (
                    self._exclude._field_name
                    if isinstance(self._exclude, _BaseField)
                    else [exc if isinstance(exc, str) else exc._field_name for exc in self._exclude]
                )
            )
        if self._collect_mode:
            body["collect_mode"] = (
                self._collect_mode if isinstance(self._collect_mode, str) else self._collect_mode.value
            )
        if self._execution_hint:
            body["execution_hint"] = (
                self._execution_hint if isinstance(self._execution_hint, str) else self._execution_hint.value
            )
        if self._missing is not None:
            body["missing"] = self._missing
        if self._value_type:
            body["value_type"] = self._value_type
        return body


class VariableWidthHistogram(Aggregation):
    """
    可变宽度直方图聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-variablewidthhistogram-aggregation.html

    This is a multi-bucket aggregation similar to Histogram. However, the width of each bucket is not specified. Rather,
    a target number of buckets is provided and bucket intervals are dynamically determined based on the document
    distribution. This is done using a simple one-pass document clustering algorithm that aims to obtain low distances
    between bucket centroids. Unlike other multi-bucket aggregations, the intervals will not necessarily have a uniform
    width.
    The number of buckets returned will always be less than or equal to the target number.

    这是一种类似于 Histogram 的多存储桶聚合。但是，未指定每个存储桶的宽度。相反，会提供存储桶的目标数量，并根据文档分布动态确定存储桶间隔。这是
    使用简单的一次性文档聚类算法完成的，该算法旨在获得存储桶质心之间的较低距离。与其他多存储桶聚合不同，间隔不一定具有统一的宽度。
    返回的存储桶数将始终小于或等于目标数字。

    :param field:
    :param buckets:
    :param shard_size:
        The shard_size parameter specifies the number of buckets that the coordinating node will request from each
        shard. A higher shard_size leads each shard to produce smaller buckets. This reduces the likelihood of buckets
        overlapping after the reduction step. Increasing the shard_size will improve the accuracy of the histogram, but
        it will also make it more expensive to compute the final result because bigger priority queues will have to be
        managed on a shard level, and the data transfers between the nodes and the client will be larger.
        Parameters buckets, shard_size, and initial_buffer are optional. By default, buckets = 10, shard_size =
        buckets * 50, and initial_buffer = min(10 * shard_size, 50000).

        shard_size 参数指定协调节点将从每个分片请求的存储桶数。较高的shard_size会导致每个分片生成较小的存储桶。这降低了缩减步骤后存储桶重叠
        的可能性。增加shard_size将提高直方图的准确性，但也会使计算最终结果的成本更高，因为必须在分片级别管理更高优先级的队列，并且节点与客户端
        之间的数据传输将更大。
        参数 buckets、shard_size 和 initial_buffer 是可选的。默认情况下，buckets = 10、shard_size = buckets * 50 和
        initial_buffer = min（10 * shard_size， 50000）。
    :param initial_buffer:
        The initial_buffer parameter can be used to specify the number of individual documents that will be stored in
        memory on a shard before the initial bucketing algorithm is run. Bucket distribution is determined using this
        sample of initial_buffer documents. So, although a higher initial_buffer will use more memory, it will lead to
        more representative clusters.
        initial_buffer 参数可用于指定在运行初始分桶算法之前将存储在分片上的内存中的单个文档的数量。存储桶分配是使用此 initial_buffer 文档
        示例确定的。因此，尽管较高的initial_buffer将使用更多内存，但它将导致更具代表性的集群。
    """
    type: str = "variable_width_histogram"

    def __init__(
            self, field: Union[_BaseField, str], buckets: int = None, shard_size: int = None, initial_buffer: int = None
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._buckets: int = buckets
        self._shard_size: int = shard_size
        self._initial_buffer: int = initial_buffer
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name
        }
        if self._buckets is not None:
            body["buckets"] = self._buckets
        if self._shard_size is not None:
            body["shard_size"] = self._shard_size
        if self._initial_buffer is not None:
            body["initial_buffer"] = self._initial_buffer
        return body










