"""
@author: 江同学呀
@file:  pipeline_aggregation.py
@date: 2025/2/23 16:12
@desc: 
    管道聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline.html

    Pipeline aggregations work on the outputs produced from other aggregations rather than from document sets, adding
    information to the output tree. There are many different types of pipeline aggregation, each computing different
    information from other aggregations, but these types can be broken down into two families:
        Parent
            A family of pipeline aggregations that is provided with the output of its parent aggregation and is able to
            compute new buckets or new aggregations to add to existing buckets.
        Sibling
            Pipeline aggregations that are provided with the output of a sibling aggregation and are able to compute a
            new aggregation which will be at the same level as the sibling aggregation.
    Pipeline aggregations can reference the aggregations they need to perform their computation by using the
    buckets_path parameter to indicate the paths to the required metrics. The syntax for defining these paths can be
    found in the buckets_path Syntax section below.
    Pipeline aggregations cannot have sub-aggregations but depending on the type it can reference another pipeline in
    the buckets_path allowing pipeline aggregations to be chained. For example, you can chain together two derivatives
    to calculate the second derivative (i.e. a derivative of a derivative).

    管道聚合处理从其他聚合而不是文档集生成的输出，从而将信息添加到输出树中。有许多不同类型的管道聚合，每种聚合计算的信息与其他聚合不同，但这些类型
    可以分为两类：
        Parent
            一系列管道聚合，随其父聚合的输出一起提供，并且能够计算新存储桶或新聚合以添加到现有存储桶。
        Sibling
            管道聚合，随同级聚合的输出一起提供，并且能够计算与同级聚合处于同一级别的新聚合。
    管道聚合可以通过使用 buckets_path 参数来指示所需指标的路径，从而引用执行计算所需的聚合。定义这些路径的语法可以在下面的 buckets_path
    Syntax 部分找到。
    管道聚合不能具有子聚合，但根据类型，它可以引用buckets_path中的另一个管道，从而允许链接管道聚合。例如，您可以将两个导数链接在一起以计算第二
    个导数（即导数的导数）。
"""
from typing import Dict, Union, List, Literal, Optional

from espc.common.agg_common import CalendarInterval
from espc.common.common import Number
from espc.orm.model.aggregation.aggregation import Aggregation
from espc.orm.model.dsl.sort_search_results import Sort
from espc.orm.model.scripting.script import Script
from espc.orm.model.struct.functions import BucketCorrelationFunction
from espc.orm.model.struct.model_configuration import ClassificationModelConfiguration, RegressionModelConfiguration


class AverageBucket(Aggregation):
    """
    平均存储桶聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-avg-bucket-aggregation.html

    A sibling pipeline aggregation which calculates the mean value of a specified metric in a sibling aggregation. The
    specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation.
    同级管道聚合，用于计算同级聚合中指定指标的平均值。指定的指标必须是数字，并且同级聚合必须是多存储桶聚合。

    :param buckets_path:
        (Required, string) Path to the buckets to average. For syntax, see buckets_path Syntax.
        （必需，字符串）要平均的存储桶的路径。有关语法，请参阅 buckets_path 语法。
    :param gap_policy:
        (Optional, string) Policy to apply when gaps are found in the data. For valid values, see Dealing with gaps in
        the data. Defaults to skip.
        （可选，字符串）在数据中发现差距时应用的策略。有关有效值，请参阅处理数据中的差距。默认为 skip。
    :param format_:
        (Optional, string) DecimalFormat pattern for the output value. If specified, the formatted value is returned in
        the aggregation’s value_as_string property.
        （可选，字符串）DecimalFormat 模式。如果指定，则格式化的值将在聚合的 value_as_string 属性中返回。
    """
    type: str = "avg_bucket"

    def __init__(self, buckets_path: str, gap_policy: str = None, format_: str = None):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._gap_policy: str = gap_policy
        self._format: str = format_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path,
        }
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        if self._format:
            body["format"] = self._format
        return body


class BucketScript(Aggregation):
    """
    存储桶脚本聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-bucket-script-aggregation.html

    A parent pipeline aggregation which executes a script which can perform per bucket computations on specified metrics
    in the parent multi-bucket aggregation. The specified metric must be numeric and the script must return a numeric
    value.
    父管道聚合，它执行一个脚本，该脚本可以对父多存储桶聚合中的指定指标执行每个存储桶的计算。指定的指标必须是数字，并且脚本必须返回一个数值。

    :param buckets_path:
        (Required)The script to run for this aggregation. The script can be inline, file or indexed. (see Scripting for
        more details)
        (Required)要为此聚合运行的脚本。该脚本可以是内联的、文件脚本或索引的。（有关更多详细信息，请参阅脚本）
    :param script:
        (Required)A map of script variables and their associated path to the buckets we wish to use for the variable (
        see buckets_path Syntax for more details)
        (Required)脚本变量的映射及其关联路径，指向我们希望用于变量的存储桶（有关详细信息，请参阅 buckets_path 语法）
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data (see Dealing with gaps in the data for more details)
        (Optional)在数据中发现差距时要应用的策略（有关更多详细信息，请参阅处理数据中的差距）
    :param format_:
        (Optional)format to apply to the output value of this aggregation
        (Optional)format 以应用于此聚合的输出值
    """
    type: str = "bucket_script"

    def __init__(self, buckets_path: Dict[str, str], script: Union[str, Dict, Script], gap_policy: str = None, format_: str = None):
        super().__init__()
        self._buckets_path: Dict[str, str] = buckets_path
        self._script: Union[str, Dict, Script] = script
        self._gap_policy: str = gap_policy
        self._format: str = format_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path,
            "script": self._script if isinstance(self._script, (str, Dict)) else self._script._build(),
        }
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        if self._format:
            body["format"] = self._format
        return body


class BucketCountKSTestCorrelation(Aggregation):
    """
    存储桶计数 K-S 测试相关性聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-count-ks-test-aggregation.html

    A sibling pipeline aggregation which executes a two sample Kolmogorov–Smirnov test (referred to as a "K-S test" from
    now on) against a provided distribution, and the distribution implied by the documents counts in the configured
    sibling aggregation. Specifically, for some metric, assuming that the percentile intervals of the metric are known
    beforehand or have been computed by an aggregation, then one would use range aggregation for the sibling to compute
    the p-value of the distribution difference between the metric and the restriction of that metric to a subset of the
    documents. A natural use case is if the sibling aggregation range aggregation nested in a terms aggregation, in
    which case one compares the overall distribution of metric to its restriction to each term.
    一个同级管道聚合，它对提供的分布执行两个样本的 Kolmogorov-Smirnov 测试（从现在开始称为“K-S 测试”），以及配置的同级聚合中的文档计数所隐含
    的分布。具体而言，对于某些量度，假设量度的百分位间隔事先已知或已通过聚合计算，则将对同级使用范围聚合来计算量度之间的分布差异的 p 值，以及该量
    度对文档子集的限制。一个自然的用例是，如果同级聚合范围聚合嵌套在术语聚合中，在这种情况下，会将量度的总体分布与其对每个术语的限制进行比较。

    :param buckets_path:
        (Required, string) Path to the buckets that contain one set of values to correlate. Must be a _count path For
        syntax, see buckets_path Syntax.
        （必需，字符串）包含一组要关联的值的存储桶的路径。必须是 _count 路径 有关语法，请参阅 buckets_path 语法。
    :param alternative:
        (Optional, list) A list of string values indicating which K-S test alternative to calculate. The valid values
        are: "greater", "less", "two_sided". This parameter is key for determining the K-S statistic used when
        calculating the K-S test. Default value is all possible alternative hypotheses.
        （可选，列表）一个字符串值列表，指示要计算的 K-S 检验备选方案。有效值为：“greater”、“less”、“two_sided”。此参数是确定计算 K-S
        检验时使用的 K-S 统计量的关键。默认值是所有可能的备择假设。
    :param fractions:
        (Optional, list) A list of doubles indicating the distribution of the samples with which to compare to the
        buckets_path results. In typical usage this is the overall proportion of documents in each bucket, which is
        compared with the actual document proportions in each bucket from the sibling aggregation counts. The default is
        to assume that overall documents are uniformly distributed on these buckets, which they would be if one used
        equal percentiles of a metric to define the bucket end points.
        （可选，列表）一个双精度列表，指示要与之比较的样本的分布buckets_path结果。在典型用法中，这是每个存储桶中文档的总体比例，与同级聚合计数
        中每个存储桶中的实际文档比例进行比较。默认情况下，假设整体文档均匀分布在这些存储桶上，如果使用指标的相等百分位数来定义存储桶终端节点，则
        情况会如此。
    :param sampling_method:
        (Optional, string) Indicates the sampling methodology when calculating the K-S test. Note, this is sampling of
        the returned values. This determines the cumulative distribution function (CDF) points used comparing the two
        samples. Default is upper_tail, which emphasizes the upper end of the CDF points. Valid options are: upper_tail,
        uniform, and lower_tail.
        （可选，字符串）指示计算 K-S 检验时的抽样方法。请注意，这是返回值的采样。这决定了用于比较两个样本的累积分布函数 （CDF） 点。默认值为
        upper_tail，它强调 CDF 点的上限。有效选项包括：upper_tail、uniform 和 lower_tail。
    """
    type: str = "bucket_count_ks_test"

    def __init__(
            self, buckets_path: str, alternative: List[Literal["greater", "less", "two_sided"]] = None,
            fractions: List[Number] = None, sampling_method: Literal["upper_tail", "uniform", "lower_tail"] = None
    ):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._alternative: List[Literal["greater", "less", "two_sided"]] = alternative
        self._fractions: List[Number] = fractions
        self._sampling_method: Optional[Literal["upper_tail", "uniform", "lower_tail"]] = sampling_method
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path,
        }
        if self._alternative:
            body["alternative"] = self._alternative
        if self._fractions:
            body["fractions"] = self._fractions
        if self._sampling_method:
            body["sampling_method"] = self._sampling_method
        return body


class BucketCorrelation(Aggregation):
    """
    存储桶关联聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-bucket-correlation-aggregation.html

    A sibling pipeline aggregation which executes a correlation function on the configured sibling multi-bucket aggregation.
    一个同级管道聚合，用于对配置的同级多存储桶聚合执行关联函数。

    :param buckets_path:
        (Required, string) Path to the buckets that contain one set of values to correlate. For syntax, see buckets_path
        Syntax.
        （必需，字符串）包含一组要关联的值的存储桶的路径。有关语法，请参阅 buckets_path 语法。
    :param function:
        (Required, object) The correlation function to execute.
        （必需，对象）要执行的相关函数。
    """
    type: str = "bucket_correlation"

    def __init__(self, buckets_path: str, function: Union[Dict, BucketCorrelationFunction]):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._function: Union[Dict, BucketCorrelationFunction] = function
        return

    def _build(self) -> Dict:
        return {
            "buckets_path": self._buckets_path,
            "function": self._function if isinstance(self._function, dict) else self._function._build()
        }


class BucketSelector(Aggregation):
    """
    存储桶选择器聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-bucket-selector-aggregation.html

    A parent pipeline aggregation which executes a script which determines whether the current bucket will be retained
    in the parent multi-bucket aggregation. The specified metric must be numeric and the script must return a boolean
    value. If the script language is expression then a numeric return value is permitted. In this case 0.0 will be
    evaluated as false and all other values will evaluate to true.
    父管道聚合，它执行一个脚本，该脚本确定当前存储桶是否将保留在父多存储桶聚合中。指定的指标必须是数字，并且脚本必须返回布尔值。如果脚本语言是
    expression，则允许使用数字返回值。在这种情况下，0.0 将被评估为 false，所有其他值将被评估为 true。

    :param buckets_path:
        (Required)A map of script variables and their associated path to the buckets we wish to use for the variable (
        see buckets_path Syntax for more details)
        (Required)脚本变量的映射及其关联路径，指向我们希望用于变量的存储桶（有关详细信息，请参阅 buckets_path 语法）
    :param script:
        (Required)The script to run for this aggregation. The script can be inline, file or indexed. (see Scripting for
        more details)
        (Required)要为此聚合运行的脚本。该脚本可以是内联的、文件脚本或索引的。（有关更多详细信息，请参阅脚本）
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data (see Dealing with gaps in the data for more
        details)
        (Optional)在数据中发现差距时要应用的策略（有关更多详细信息，请参阅处理数据中的差距）
    """
    type: str = "bucket_selector"

    def __init__(
            self, buckets_path: Dict[str, str], script: Union[str, Dict, Script],
            gap_policy: Literal["skip", "insert_zeros"] = None
    ):
        super().__init__()
        self._buckets_path: Dict[str, str] = buckets_path
        self._script: Union[str, Dict, Script] = script
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path,
            "script": self._script if isinstance(self._script, (str, Dict)) else self._script._build()
        }
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        return body


class BucketSort(Aggregation):
    """
    存储桶排序聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-bucket-sort-aggregation.html

    A parent pipeline aggregation which sorts the buckets of its parent multi-bucket aggregation. Zero or more sort
    fields may be specified together with the corresponding sort order. Each bucket may be sorted based on its _key,
    _count or its sub-aggregations. In addition, parameters from and size may be set in order to truncate the result
    buckets.
    父管道聚合，用于对其父多存储桶聚合的存储桶进行排序。可以指定零个或多个排序字段以及相应的排序顺序。每个存储桶都可以根据其 _key、_count 或子
    聚合进行排序。此外，可以设置参数 from 和 size 以截断结果存储桶。

    :param sort:
        (Optional)The list of fields to sort on. See sort for more details.
        (Optional)要排序的字段列表。有关更多详细信息，请参阅 排序。
    :param from_:
        (Optional)Buckets in positions prior to the set value will be truncated.
        (Optional)位于设置值之前位置的存储桶将被截断。
    :param size:
        (Optional)The number of buckets to return. Defaults to all buckets of the parent aggregation.
        (Optional)要返回的存储桶数。默认为父聚合的所有存储桶。
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data (see Dealing with gaps in the data for more details)
        (Optional)在数据中发现差距时要应用的策略（有关更多详细信息，请参阅处理数据中的差距）
    """
    type: str = "bucket_sort"

    def __init__(
            self, sort: List[Union[Dict, Sort]] = None, from_: int = None, size: int = None,
            gap_policy: Literal["skip", "insert_zeros"] = None
    ):
        super().__init__()
        self._sort: List[Union[Dict, Sort]] = sort
        self._from: int = from_
        self._size: int = size
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        return

    def _build(self) -> Dict:
        body: Dict = {}
        if self._sort:
            body["sort"] = [
                s if isinstance(s, Dict) else s._build() for s in self._sort
            ]
        if self._from:
            body["from"] = self._from
        if self._size:
            body["size"] = self._size
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        return body


class CumulativeCardinality(Aggregation):
    """
    累积基数聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-cumulative-cardinality-aggregation.html

    A parent pipeline aggregation which calculates the Cumulative Cardinality in a parent histogram (or date_histogram)
    aggregation. The specified metric must be a cardinality aggregation and the enclosing histogram must have
    min_doc_count set to 0 (default for histogram aggregations).
    父管道聚合，用于计算父直方图（或 date_histogram）聚合中的 Cumulative Cardinality。指定的度量必须是基数聚合，并且封闭的直方图必须将
    min_doc_count 设置为 0（直方图聚合的默认值）。

    :param buckets_path:
        (Required)The path to the cardinality aggregation we wish to find the cumulative cardinality for (see
        buckets_path Syntax for more details)
        (Required)我们希望找到其累积基数的基数聚合的路径（有关详细信息，请参阅 buckets_path 语法）
    :param format_:
        (Optional)format to apply to the output value of this aggregation
        (Optional)format 以应用于此聚合的输出值
    """
    type: str = "cumulative_cardinality"

    def __init__(self, buckets_path: str, format_: str = None):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._format: str = format_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path
        }
        if self._format:
            body["format"] = self._format
        return body


class CumulativeSum(Aggregation):
    """
    累积总和聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-cumulative-sum-aggregation.html

    A parent pipeline aggregation which calculates the cumulative sum of a specified metric in a parent histogram (or
    date_histogram) aggregation. The specified metric must be numeric and the enclosing histogram must have
    min_doc_count set to 0 (default for histogram aggregations).
    父管道聚合，用于计算父直方图（或 date_histogram）聚合中指定指标的累积总和。指定的度量必须是数字，并且封闭的直方图必须将 min_doc_count
    设置为 0（直方图聚合的默认值）。

    :param buckets_path:
        (Required)The path to the cardinality aggregation we wish to find the cumulative cardinality for (see
        buckets_path Syntax for more details)
        (Required)我们希望找到其累积基数的基数聚合的路径（有关详细信息，请参阅 buckets_path 语法）
    :param format_:
        (Optional)format to apply to the output value of this aggregation
        (Optional)format 以应用于此聚合的输出值
    """
    type: str = "cumulative_sum"

    def __init__(self, buckets_path: str, format_: str = None):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._format: str = format_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path
        }
        if self._format:
            body["format"] = self._format
        return body


class Derivative(Aggregation):
    """
    导数聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-derivative-aggregation.html

    A parent pipeline aggregation which calculates the derivative of a specified metric in a parent histogram (or
    date_histogram) aggregation. The specified metric must be numeric and the enclosing histogram must have
    min_doc_count set to 0 (default for histogram aggregations).
    父管道聚合，用于计算父直方图（或 date_histogram）聚合中指定量度的导数。指定的度量必须是数字，并且封闭的直方图必须将 min_doc_count 设置
    为 0（直方图聚合的默认值）。

    :param buckets_path:
        (Required)The path to the buckets we wish to find the derivative for (see buckets_path Syntax for more details)
        (Required)我们希望找到其衍生数的存储桶的路径（有关详细信息，请参阅 buckets_path 语法）
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data (see Dealing with gaps in the data for more details)
        (Optional)在数据中发现差距时要应用的策略（有关更多详细信息，请参阅处理数据中的差距）
    :param format_:
        (Optional)format to apply to the output value of this aggregation
        (Optional)format 以应用于此聚合的输出值
    :param unit:
    """
    type: str = "derivative"

    def __init__(
            self, buckets_path: str, gap_policy: Literal["skip", "insert_zeros"] = None, format_: str = None,
            unit: Union[
                Literal["second", "minute", "hour", "day", "week", "month", "quarter", "year"], CalendarInterval
            ] = None
    ):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        self._format: str = format_
        self._unit: Union[
            Literal["second", "minute", "hour", "day", "week", "month", "quarter", "year"], CalendarInterval, None
        ] = unit
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path
        }
        if self._format:
            body["format"] = self._format
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        if self._unit:
            body["unit"] = self._unit if isinstance(self._unit, str) else self._unit.value
        return body


class ExtendedStatsBucket(Aggregation):
    """
    扩展的统计存储桶聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-extended-stats-bucket-aggregation.html

    A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a
    sibling aggregation. The specified metric must be numeric and the sibling aggregation must be a multi-bucket
    aggregation.
    This aggregation provides a few more statistics (sum of squares, standard deviation, etc) compared to the
    stats_bucket aggregation.

    同级管道聚合，用于计算同级聚合中指定指标的所有存储桶中的各种统计信息。指定的指标必须是数字，并且同级聚合必须是多存储桶聚合。
    与 stats_bucket 聚合相比，此聚合提供了更多的统计数据 （平方和、标准差等）。

    :param buckets_path:
        (Required)The path to the buckets we wish to calculate stats for (see buckets_path Syntax for more details)
        (Required)我们希望为其计算统计数据的存储桶的路径（有关更多详细信息，请参阅 buckets_path 语法）
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data (see Dealing with gaps in the data for more details)
        (Optional)在数据中发现差距时要应用的策略（有关更多详细信息，请参阅处理数据中的差距）
    :param format_:
        (Optional)format to apply to the output value of this aggregation
        (Optional)format 以应用于此聚合的输出值
    :param sigma:
        (Optional)The number of standard deviations above/below the mean to display
        (Optional)要显示的高于/低于平均值的标准差的数量
    """
    type: str = "extended_stats_bucket"

    def __init__(
            self, buckets_path: str, gap_policy: Literal["skip", "insert_zeros"] = None, format_: str = None,
            sigma: int = None
    ):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        self._format: str = format_
        self._sigma: int = sigma
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path
        }
        if self._format:
            body["format"] = self._format
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        if self._sigma:
            body["sigma"] = self._sigma
        return body


class InferenceBucket(Aggregation):
    """
    推理存储桶聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-inference-bucket-aggregation.html

    A parent pipeline aggregation which loads a pre-trained model and performs inference on the collated result fields
    from the parent bucket aggregation.
    To use the inference bucket aggregation, you need to have the same security privileges that are required for using
    the get trained models API.

    父管道聚合，用于加载预先训练的模型，并对父存储桶聚合中的整理结果字段执行推理。
    要使用推理存储桶聚合，您需要具有使用获取训练模型 API 所需的相同安全权限。

    :param model_id:
        (Required)The ID or alias for the trained model.
        (Required)已训练模型的 ID 或别名。
    :param inference_config:
        (Optional)Contains the inference type and its options. There are two types: regression and classification
        (Optional)包含推理类型及其选项。有两种类型：回归和分类
    :param buckets_path:
        (Required)Defines the paths to the input aggregations and maps the aggregation names to the field names expected
        by the model. See buckets_path Syntax for more details
        (Required)定义输入聚合的路径，并将聚合名称映射到模型所需的字段名称。有关详细信息，请参阅 buckets_path 语法
    """
    type: str = "inference_bucket"

    def __init__(
            self, model_id: str,
            inference_config: Union[
                Dict[
                    Literal["num_top_classes", "num_top_feature_importance_values", "prediction_field_type"],
                    Union[int, str]
                ],
                RegressionModelConfiguration,
                ClassificationModelConfiguration
            ],
            buckets_path: Dict[str, str]
    ):
        super().__init__()
        self._model_id: str = model_id
        self._inference_config: Union[
            Dict[
                Literal["num_top_classes", "num_top_feature_importance_values", "prediction_field_type"],
                Union[int, str]
            ],
            RegressionModelConfiguration,
            ClassificationModelConfiguration
        ] = inference_config
        self._buckets_path: Dict[str, str] = buckets_path
        return

    def _build(self) -> Dict:
        body: Dict = {
            "model_id": self._model_id,
            "buckets_path": self._buckets_path
        }
        if self._inference_config:
            body["inference_config"] = (
                self._inference_config if isinstance(self._inference_config, Dict) else self._inference_config._build()
            )
        return body


class MaxBucket(Aggregation):
    """
    最大存储桶聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-max-bucket-aggregation.html

    A sibling pipeline aggregation which identifies the bucket(s) with the maximum value of a specified metric in a
    sibling aggregation and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric
    and the sibling aggregation must be a multi-bucket aggregation.
    同级管道聚合，用于标识同级聚合中具有指定指标最大值的存储桶，并输出存储桶的值和键。指定的指标必须是数字，并且同级聚合必须是多存储桶聚合。

    :param buckets_path:
        (Required)The path to the buckets we wish to find the derivative for (see buckets_path Syntax for more details)
        (Required)我们希望找到其衍生数的存储桶的路径（有关详细信息，请参阅 buckets_path 语法）
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data (see Dealing with gaps in the data for more details)
        (Optional)在数据中发现差距时要应用的策略（有关更多详细信息，请参阅处理数据中的差距）
    :param format_:
        (Optional)format to apply to the output value of this aggregation
        (Optional)format 以应用于此聚合的输出值
    """
    type: str = "max_bucket"

    def __init__(self, buckets_path: str, gap_policy: Literal["skip", "insert_zeros"] = None, format_: str = None):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        self._format: str = format_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path
        }
        if self._format:
            body["format"] = self._format
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        return body


class MinBucket(Aggregation):
    """
    最小存储桶聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-min-bucket-aggregation.html

    A sibling pipeline aggregation which identifies the bucket(s) with the minimum value of a specified metric in a
    sibling aggregation and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric
    and the sibling aggregation must be a multi-bucket aggregation.
    同级管道聚合，用于标识同级聚合中具有指定指标最小值的存储桶，并输出存储桶的值和键。指定的指标必须是数字，并且同级聚合必须是多存储桶聚合。

    :param buckets_path:
        (Required)The path to the buckets we wish to find the derivative for (see buckets_path Syntax for more details)
        (Required)我们希望找到其衍生数的存储桶的路径（有关详细信息，请参阅 buckets_path 语法）
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data (see Dealing with gaps in the data for more details)
        (Optional)在数据中发现差距时要应用的策略（有关更多详细信息，请参阅处理数据中的差距）
    :param format_:
        (Optional)format to apply to the output value of this aggregation
        (Optional)format 以应用于此聚合的输出值
    """
    type: str = "min_bucket"

    def __init__(self, buckets_path: str, gap_policy: Literal["skip", "insert_zeros"] = None, format_: str = None):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        self._format: str = format_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path
        }
        if self._format:
            body["format"] = self._format
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        return body


class MovingAverage(Aggregation):
    """
    移动平均聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-movavg-aggregation.html

    Moving averages are a simple method to smooth sequential data. Moving averages are typically applied to time-based
    data, such as stock prices or server metrics. The smoothing can be used to eliminate high frequency fluctuations or
    random noise, which allows the lower frequency trends to be more easily visualized, such as seasonality.
    移动平均线是一种平滑顺序数据的简单方法。移动平均线通常应用于基于时间的数据，例如股票价格或服务器指标。平滑可用于消除高频波动或随机噪声，从而
    更容易地可视化低频趋势，例如季节性。

    :param buckets_path:
        (Required)Path to the metric of interest (see buckets_path Syntax for more details
        (Required)指向感兴趣指标的路径（有关详细信息，请参阅 buckets_path 语法
    :param model:
        (Optional)The moving average weighting model that we wish to use
        (Optional)我们希望使用的移动平均加权模型
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data. See Dealing with gaps in the data.
        (Optional)在数据中发现差距时要应用的策略。请参阅处理数据中的缺口。
    :param window:
        (Optional)The size of window to "slide" across the histogram.
        (Optional)在直方图上“滑动”的窗口大小。
    :param minimize:
        (Optional)If the model should be algorithmically minimized. See Minimization for more details
        (Optional)如果模型应该在算法上最小化。有关更多详细信息，请参阅最小化
    :param settings:
        (Optional)Model-specific settings, contents which differ depending on the model specified.
        (Optional)特定于模型的设置，内容因指定的模型而异。
    :param predict:
        All the moving average model support a "prediction" mode, which will attempt to extrapolate into the future
        given the current smoothed, moving average. Depending on the model and parameter, these predictions may or may
        not be accurate.
        Predictions are enabled by adding a predict parameter to any moving average aggregation, specifying the number
        of predictions you would like appended to the end of the series. These predictions will be spaced out at the
        same interval as your buckets

        所有移动平均线模型都支持 “预测” 模式，该模式将尝试根据当前平滑的移动平均线推断到未来。根据模型和参数的不同，这些预测可能准确，也可能不准确。
        通过将 predict 参数添加到任何移动平均聚合来启用预测，并指定要附加到序列末尾的预测数。这些预测将以与您的存储桶相同的间隔进行间隔
    """
    type: str = "moving_avg"

    def __init__(
            self, buckets_path: str,
            model: Union[Literal["simple", "linear", "ewma", "holt", "holt_winters"], str] = None,
            gap_policy: Literal["skip", "insert_zeros"] = None, window: int = None, minimize: bool = None,
            settings: Dict = None, predict: int = None
    ):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._model: Union[Literal["simple", "linear", "ewma", "holt", "holt_winters"], str, None] = model
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        self._window: int = window
        self._minimize: bool = minimize
        self._settings: Dict = settings
        self._predict: int = predict
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path
        }
        if self._model:
            body["model"] = self._model
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        if self._window is not None:
            body["window"] = self._window
        if self._minimize is not None:
            body["minimize"] = self._minimize
        if self._settings:
            body["settings"] = self._settings
        if self._predict is not None:
            body["predict"] = self._predict
        return body


class MovingFunction(Aggregation):
    """
    移动函数聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-movfn-aggregation.html

    Given an ordered series of data, the Moving Function aggregation will slide a window across the data and allow the
    user to specify a custom script that is executed on each window of data. For convenience, a number of common
    functions are predefined such as min/max, moving averages, etc.
    This is conceptually very similar to the Moving Average pipeline aggregation, except it provides more functionality.

    给定一系列有序的数据，Moving Function 聚合将在数据上滑动一个窗口，并允许用户指定在每个数据窗口上执行的自定义脚本。为方便起见，预定义了许多常用函数，例如 min/max、移动平均线等。
    这在概念上与 Moving Average 管道聚合非常相似，只是它提供了更多功能。

    :param buckets_path:
        (Required)Path to the metric of interest (see buckets_path Syntax for more details
        (Required)目标指标的路径（有关详细信息，请参阅 buckets_path语法
    :param window:
        (Required)The size of window to "slide" across the histogram.
        (Required)在直方图上“滑动”的窗口大小。
    :param script:
        (Required)The script that should be executed on each window of data
        (Required)应在每个数据窗口上执行的脚本
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data. See Dealing with gaps in the data.
        (Optional)在数据中发现差距时要应用的策略。请参阅处理数据中的缺口。
    :param shift:
        (Optional)Shift of window position.
        (Optional)窗口位置的偏移。
    """
    type: str = "moving_function"

    def __init__(
            self, buckets_path: str, window: int, script: str, gap_policy: Literal["skip", "insert_zeros"] = None,
            shift: int = None
    ):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._window: int = window
        self._script: str = script
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        self._shift: int = shift
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path,
            "window": self._window,
            "script": self._script
        }
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        if self._shift is not None:
            body["shift"] = self._shift
        return body


class MovingPercentiles(Aggregation):
    """
    移动百分位数聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-moving-percentiles-aggregation.html

    Given an ordered series of percentiles, the Moving Percentile aggregation will slide a window across those
    percentiles and allow the user to compute the cumulative percentile.
    This is conceptually very similar to the Moving Function pipeline aggregation, except it works on the percentiles
    sketches instead of the actual buckets values.

    给定一系列有序的百分位数，移动百分位数聚合将在这些百分位数上滑动一个窗口，并允许用户计算累积百分位数。
    这在概念上与 Moving Function 管道聚合非常相似，不同之处在于它适用于百分位数草图，而不是实际的存储桶值。

    :param buckets_path:
        (Required)Path to the metric of interest (see buckets_path Syntax for more details
        (Required)目标指标的路径（有关详细信息，请参阅 buckets_path语法
    :param window:
        (Required)The size of window to "slide" across the histogram.
        (Required)在直方图上“滑动”的窗口大小。
    :param shift:
        (Optional)Shift of window position.
        (Optional)窗口位置的偏移。
    """
    type: str = "moving_percentiles"

    def __init__(self, buckets_path: str, window: int, shift: int = None):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._window: int = window
        self._shift: int = shift
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path,
            "window": self._window,
        }
        if self._shift is not None:
            body["shift"] = self._shift
        return body


class Normalize(Aggregation):
    """
    规范化聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-normalize-aggregation.html

    A parent pipeline aggregation which calculates the specific normalized/rescaled value for a specific bucket value.
    Values that cannot be normalized, will be skipped using the skip gap policy.
    一个父管道聚合，用于计算特定存储桶值的特定规范化/重新调整值。无法规范化的值将使用 skip gap 策略跳过。

    :param buckets_path:
        (Required)The path to the buckets we wish to normalize (see buckets_path syntax for more details)
        (Required)我们希望归一化的存储桶的路径（有关详细信息，请参阅 buckets_path 语法）
    :param method:
        (Required)The specific method to apply
        The Normalize Aggregation supports multiple methods to transform the bucket values. Each method definition will
        use the following original set of bucket values as examples: [5, 5, 10, 50, 10, 20].

            rescale_0_1
                This method rescales the data such that the minimum number is zero, and the maximum number is 1, with
                the rest normalized linearly in-between.
                x' = (x - min_x) / (max_x - min_x)
                [0, 0, .1111, 1, .1111, .3333]
            rescale_0_100
                This method rescales the data such that the minimum number is zero, and the maximum number is 100, with
                the rest normalized linearly in-between.
                x' = 100 * (x - min_x) / (max_x - min_x)
                [0, 0, 11.11, 100, 11.11, 33.33]
            percent_of_sum
                This method normalizes each value so that it represents a percentage of the total sum it attributes to.
                x' = x / sum_x
                [5%, 5%, 10%, 50%, 10%, 20%]
            mean
                This method normalizes such that each value is normalized by how much it differs from the average.
                x' = (x - mean_x) / (max_x - min_x)
                [4.63, 4.63, 9.63, 49.63, 9.63, 9.63, 19.63]
            zscore
                This method normalizes such that each value represents how far it is from the mean relative to the
                standard deviation
                x' = (x - mean_x) / stdev_x
                [-0.68, -0.68, -0.39, 1.94, -0.39, 0.19]
            softmax
                This method normalizes such that each value is exponentiated and relative to the sum of the exponents
                of the original values.
                x' = e^x / sum_e_x
                [2.862E-20, 2.862E-20, 4.248E-18, 0.999, 9.357E-14, 4.248E-18]

        (Required)具体申请方法
        Normalize Aggregation 支持多种方法来转换存储桶值。每个方法定义都将使用以下原始 bucket 值集作为示例：[5， 5， 10， 50， 10， 20]。
            rescale_0_1
                此方法重新调整数据规模，使最小数字为零，最大数字为 1，其余数字在两者之间线性标准化。
                x' = (x - min_x) / (max_x - min_x)
                [0, 0, .1111, 1, .1111, .3333]
            rescale_0_100
                此方法重新调整数据规模，使最小数字为零，最大数字为 100，其余部分在两者之间线性标准化。
                x' = 100 * (x - min_x) / (max_x - min_x)
                [0, 0, 11.11, 100, 11.11, 33.33]
            percent_of_sum
                此方法对每个值进行规范化，以便它表示其归属的总和的百分比。
                x' = x / sum_x
                [5%, 5%, 10%, 50%, 10%, 20%]
            mean
                此方法进行规范化，以便根据每个值与平均值的差异程度对每个值进行规范化。
                x' = (x - mean_x) / (max_x - min_x)
                [4.63, 4.63, 9.63, 49.63, 9.63, 9.63, 19.63]
            zscore
                此方法进行归一化，以便每个值都表示相对于标准差的平均值的距离
                x' = (x - mean_x) / stdev_x
                [-0.68, -0.68, -0.39, 1.94, -0.39, 0.19]
            softmax
                此方法进行规范化，以便每个值都是幂运算，并且相对于原始值的指数之和。
                x' = e^x / sum_e_x
                [2.862E-20, 2.862E-20, 4.248E-18, 0.999, 9.357E-14, 4.248E-18]
    :param format_:
        (Optional)format to apply to the output value of this aggregation
        (Optional)format 以应用于此聚合的输出值
    """
    type: str = "normalize"

    def __init__(
            self, buckets_path: str,
            method: Literal["rescale_0_1", "rescale_0_100", "percent_of_sum", "mean", "zscore", "softmax"],
            format_: str = None
    ):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._method: Literal["rescale_0_1", "rescale_0_100", "percent_of_sum", "mean", "zscore", "softmax"] = method
        self._format: str = format_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path,
            "method": self._method,
        }
        if self._format:
            body["format"] = self._format
        return body


class PercentilesBucket(Aggregation):
    """
    百分位数存储桶聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-percentiles-bucket-aggregation.html

    A sibling pipeline aggregation which calculates percentiles across all bucket of a specified metric in a sibling
    aggregation. The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation.
    同级管道聚合，用于计算同级聚合中指定指标的所有存储桶的百分位数。指定的指标必须是数字，并且同级聚合必须是多存储桶聚合。

    :param buckets_path:
        (Required)The path to the buckets we wish to find the percentiles for (see buckets_path Syntax for more details)
        (Required)我们希望找到其百分位数的存储桶的路径（有关详细信息，请参阅 buckets_path 语法）
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data (see Dealing with gaps in the data for more details)
        (Optional)在数据中发现差距时要应用的策略（有关更多详细信息，请参阅处理数据中的差距）
    :param format_:
        (Optional)format to apply to the output value of this aggregation
        (Optional)format 以应用于此聚合的输出值
    :param percents:
        (Optional)The list of percentiles to calculate
        (Optional)要计算的百分位数列表
    :param keyed:
        (Optional)Flag which returns the range as an hash instead of an array of key-value pairs
        (Optional)Flag 将范围作为哈希而不是键值对数组返回
    """
    type: str = "percentiles_bucket"

    def __init__(
            self, buckets_path: str, gap_policy: Literal["skip", "insert_zeros"] = None, format_: str = None,
            percents: List[Number] = None, keyed: bool = None
    ):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        self._format: str = format_
        self._percents: List[Number] = percents
        self._keyed: bool = keyed
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path,
        }
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        if self._format:
            body["format"] = self._format
        if self._percents:
            body["percents"] = self._percents
        if self._keyed is not None:
            body["keyed"] = self._keyed
        return body


class SerialDifferencing(Aggregation):
    """
    串行差分聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-serialdiff-aggregation.html

    Serial differencing is a technique where values in a time series are subtracted from itself at different time lags
    or periods. For example, the datapoint f(x) = f(xt) - f(xt-n), where n is the period being used.
    A period of 1 is equivalent to a derivative with no time normalization: it is simply the change from one point to
    the next. Single periods are useful for removing constant, linear trends.
    Single periods are also useful for transforming data into a stationary series. In this example, the Dow Jones is
    plotted over ~250 days. The raw data is not stationary, which would make it difficult to use with some techniques.
    By calculating the first-difference, we de-trend the data (e.g. remove a constant, linear trend). We can see that
    the data becomes a stationary series (e.g. the first difference is randomly distributed around zero, and doesn’t
    seem to exhibit any pattern/behavior). The transformation reveals that the dataset is following a random-walk; the
    value is the previous value +/- a random amount. This insight allows selection of further tools for analysis.

    序列差分是一种技术，其中在不同的时间滞后或周期从时间序列中的值中减去自身。例如，数据点 f（x） = f（xt） - f（xt-n），其中 n 是正在使用的
    时间段。
    周期 1 相当于没有时间归一化的导数：它只是从一个点到下一个点的变化。单个周期可用于删除恒定的线性趋势。
    单个周期也可用于将数据转换为平稳序列。在此示例中，道琼斯指数绘制了 ~250 天。原始数据不是平稳的，这使得它很难与某些技术一起使用。
    通过计算一差值，我们对数据进行去趋势化（例如，删除一个恒定的线性趋势）。我们可以看到数据变成了一个平稳的序列（例如，第一个差异随机分布在 0
    附近，并且似乎没有表现出任何模式/行为）。转换显示数据集正在遵循随机游走;该值是前一个值 +/- 一个随机量。这种洞察力允许选择进一步的分析工具。

    :param buckets_path:
        (Required)Path to the metric of interest (see buckets_path Syntax for more details
        (Required)指向感兴趣指标的路径（有关详细信息，请参阅 buckets_path 语法
    :param lag:
        (Optional)The historical bucket to subtract from the current value. E.g. a lag of 7 will subtract the current value from
        the value 7 buckets ago. Must be a positive, non-zero integer
        (Optional)要从当前值中减去的历史存储桶。例如，滞后 7 将从 7 个桶前的值中减去当前值。必须是非零正整数
    :param gap_policy:
        (Optional)Determines what should happen when a gap in the data is encountered.
        (Optional)确定在遇到数据间隙时应执行的作。
    :param format_:
        (Optional)Format to apply to the output value of this aggregation
        (Optional)要应用于此聚合的输出值的格式
    """
    type: str = "serial_diff"

    def __init__(
            self, buckets_path: str, lag: int = None, gap_policy: Literal["skip", "insert_zeros"] = None,
            format_: str = None
    ):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._lag: int = lag
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        self._format: str = format_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path,
        }
        if self._lag is not None:
            body["lag"] = self._lag
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        if self._format:
            body["format"] = self._format
        return body


class StatsBucket(Aggregation):
    """
    统计存储桶聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-stats-bucket-aggregation.html

    A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a
    sibling aggregation. The specified metric must be numeric and the sibling aggregation must be a multi-bucket
    aggregation.
    同级管道聚合，用于计算同级聚合中指定指标的所有存储桶中的各种统计信息。指定的指标必须是数字，并且同级聚合必须是多存储桶聚合。

    :param buckets_path:
        (Required)The path to the buckets we wish to calculate stats for (see buckets_path Syntax for more details)
        (Required)我们希望为其计算统计数据的存储桶的路径（有关更多详细信息，请参阅 buckets_path 语法）
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data (see Dealing with gaps in the data for more details)
        (Optional)在数据中发现差距时要应用的策略（有关更多详细信息，请参阅处理数据中的差距）
    :param format_:
        (Optional)format to apply to the output value of this aggregation
        (Optional)format 以应用于此聚合的输出值
    """
    type: str = "stats_bucket"

    def __init__(
            self, buckets_path: str, gap_policy: Literal["skip", "insert_zeros"] = None, format_: str = None
    ):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        self._format: str = format_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path,
        }
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        if self._format:
            body["format"] = self._format
        return body


class SumBucket(Aggregation):
    """
    存储桶聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-pipeline-sum-bucket-aggregation.html

    A sibling pipeline aggregation which calculates the sum across all buckets of a specified metric in a sibling
    aggregation. The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation.
    同级管道聚合，用于计算同级聚合中指定指标的所有存储桶的总和。指定的指标必须是数字，并且同级聚合必须是多存储桶聚合。

    :param buckets_path:
        (Required)The path to the buckets we wish to calculate stats for (see buckets_path Syntax for more details)
        (Required)我们希望为其计算统计数据的存储桶的路径（有关更多详细信息，请参阅 buckets_path 语法）
    :param gap_policy:
        (Optional)The policy to apply when gaps are found in the data (see Dealing with gaps in the data for more details)
        (Optional)在数据中发现差距时要应用的策略（有关更多详细信息，请参阅处理数据中的差距）
    :param format_:
        (Optional)format to apply to the output value of this aggregation
        (Optional)format 以应用于此聚合的输出值
    """
    type: str = "sum_bucket"

    def __init__(
            self, buckets_path: str, gap_policy: Literal["skip", "insert_zeros"] = None, format_: str = None
    ):
        super().__init__()
        self._buckets_path: str = buckets_path
        self._gap_policy: Optional[Literal["skip", "insert_zeros"]] = gap_policy
        self._format: str = format_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "buckets_path": self._buckets_path,
        }
        if self._gap_policy:
            body["gap_policy"] = self._gap_policy
        if self._format:
            body["format"] = self._format
        return body

