"""
@author: 江同学呀
@file: metrics_aggregation.py
@date: 2024/7/25 11:53
@desc:
    指标聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics.html

    The aggregations in this family compute metrics based on values extracted in one way or another from the documents
    that are being aggregated. The values are typically extracted from the fields of the document (using the field data)
    , but can also be generated using scripts.
    Numeric metrics aggregations are a special type of metrics aggregation which output numeric values. Some
    aggregations output a single numeric metric (e.g. avg) and are called single-value numeric metrics aggregation,
    others generate multiple metrics (e.g. stats) and are called multi-value numeric metrics aggregation. The
    distinction between single-value and multi-value numeric metrics aggregations plays a role when these aggregations
    serve as direct sub-aggregations of some bucket aggregations (some bucket aggregations enable you to sort the
    returned buckets based on the numeric metrics in each bucket).

    此系列中的聚合基于以某种方式从正在聚合的文档中提取的值来计算度量。这些值通常从文档的字段中提取（使用字段数据），但也可以使用脚本生成。
    数值指标聚合是一种特殊类型的指标聚合，可输出数值。某些聚合输出单个数值指标（例如 avg），称为单值数值指标聚合，其他聚合生成多个指标（例如统计
    数据），称为多值数值指标聚合。当单值和多值数值指标聚合用作某些存储桶聚合的直接子聚合时，这些聚合将发挥作用（某些存储桶聚合使您能够根据每个存
    储桶中的数值指标对返回的存储桶进行排序）。
"""
from typing import Union, Dict, Any, Literal, List, Optional, Iterable

from espc.common.agg_common import MatrixStatsMode, CalendarInterval
from espc.common.common import Number
from espc.orm.model.aggregation.aggregation import Aggregation
from espc.orm.model.dsl.queries.base_queries import _BaseQueries
from espc.orm.model.dsl.search_fields import Source
from espc.orm.model.dsl.sort_search_results import Sort
from espc.orm.model.mapping.field.base_field.base_field import _BaseField


class Avg(Aggregation):
    """
    Avg 聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-avg-aggregation.html

    A single-value metrics aggregation that computes the average of numeric values that are extracted from the
    aggregated documents. These values can be extracted either from specific numeric fields in the documents.
    一种单值度量聚合，用于计算从聚合文档中提取的数值的平均值。这些值可以从文档中的特定数字字段中提取。

    :param field:
    :param missing:
    """
    type: str = "avg"

    def __init__(self, field: Union[_BaseField, str], missing: Number = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._missing: Number = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._missing is not None:
            body["missing"] = self._missing
        return body


class Boxplot(Aggregation):
    """
    箱线图聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-boxplot-aggregation.html

    A boxplot metrics aggregation that computes boxplot of numeric values extracted from the aggregated documents.
    These values can be generated from specific numeric or histogram fields in the documents.
    The boxplot aggregation returns essential information for making a box plot: minimum, maximum median, first quartile
    (25th percentile) and third quartile (75th percentile) values.

    一种箱线图度量聚合，用于计算从聚合文档中提取的数值的箱线图。这些值可以从文档中的特定数字或直方图字段生成。
    箱线图聚合返回制作箱线图的基本信息：最小值、最大中位数、第一四分位数（第 25 个百分位数）和第三个四分位数（第 75 个百分位数）值。

    :param field:
    :param compression:
        Approximate algorithms must balance memory utilization with estimation accuracy. This balance can be controlled
        using a compression parameter
        近似算法必须平衡内存利用率和估计准确性。这种平衡可以使用 compression 参数进行控制
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    """
    type: str = "boxplot"

    def __init__(self, field: Union[_BaseField, str], compression: Number = None, missing: Number = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._compression: Number = compression
        self._missing: Number = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._compression is not None:
            body["compression"] = self._compression
        if self._missing is not None:
            body["missing"] = self._missing
        return body


class Cardinality(Aggregation):
    """
    Cardinality 聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-cardinality-aggregation.html

    A single-value metrics aggregation that calculates an approximate count of distinct values.
    一个单值指标聚合，用于计算非重复值的近似计数。

    :param field:
    :param precision_threshold:
        The precision_threshold options allows to trade memory for accuracy, and defines a unique count below which
        counts are expected to be close to accurate. Above this value, counts might become a bit more fuzzy. The maximum
        supported value is 40000, thresholds above this number will have the same effect as a threshold of 40000. The
        default value is 3000.
        precision_threshold选项允许以内存换取准确性，并定义一个唯一计数，低于该计数的计数应接近准确。高于此值时，计数可能会变得更加模糊。
        支持的最大值为 40000，高于此数字的阈值将与阈值 40000 具有相同的效果。默认值为 3000。
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    """
    type: str = "cardinality"

    def __init__(self, field: Union[_BaseField, str], precision_threshold: Number = None, missing: Union[Number, str] = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._precision_threshold: Number = precision_threshold
        self._missing: Union[Number, str] = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._precision_threshold is not None:
            body["precision_threshold"] = self._precision_threshold
        if self._missing is not None:
            body["missing"] = self._missing
        return body


class ExtendedStats(Aggregation):
    """
    扩展的统计信息聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-extendedstats-aggregation.html

    A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents.
    The extended_stats aggregations is an extended version of the stats aggregation, where additional metrics are added
    such as sum_of_squares, variance, std_deviation and std_deviation_bounds.

    一种多值量度聚合，用于计算从聚合文档中提取的数值的统计信息。
    extended_stats 聚合是统计信息聚合的扩展版本，其中添加了其他指标，例如 sum_of_squares、方差、std_deviation 和 std_deviation_bounds。

    :param field:
    :param sigma:
        sigma can be any non-negative double, meaning you can request non-integer values such as 1.5. A value of 0 is
        valid, but will simply return the average for both upper and lower bounds.
        The upper and lower bounds are calculated as population metrics so they are always the same as upper_population
        and lower_population respectively.

        sigma 可以是任何非负双精度值，这意味着您可以请求非整数值，例如 1.5。值 0 有效，但只会返回上限和下限的平均值。
        上限和下限是作为群体量度计算的，因此它们始终分别与 upper_population 和 lower_population 相同。
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    """
    type: str = "extended_stats"

    def __init__(self, field: Union[_BaseField, str], sigma: Number = None, missing: Number = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._sigma: Number = sigma
        self._missing: Number = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._sigma is not None:
            body["sigma"] = self._sigma
        if self._missing is not None:
            body["missing"] = self._missing
        return body


class GeoBounds(Aggregation):
    """
    地理边界聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-geobounds-aggregation.html

    A metric aggregation that computes the bounding box containing all geo values for a field.
    一个量度聚合，用于计算包含字段的所有 geo 值的边界框。

    :param field:
    :param wrap_longitude:
        wrap_longitude is an optional parameter which specifies whether the bounding box should be allowed to overlap
        the international date line. The default value is true.
        wrap_longitude 是一个可选参数，用于指定是否允许边界框与国际日期变更线重叠。默认值为 true。
    """
    type: str = "geo_bounds"

    def __init__(self, field: Union[_BaseField, str], wrap_longitude: bool = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._wrap_longitude: bool = wrap_longitude
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._wrap_longitude is not None:
            body["wrap_longitude"] = self._wrap_longitude
        return body


class GeoCentroid(Aggregation):
    """
    地质心聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-geocentroid-aggregation.html

    A metric aggregation that computes the weighted centroid from all coordinate values for geo fields.
    一种指标聚合，用于根据 geo 字段的所有坐标值计算加权质心。

    :param field:
    """
    type: str = "geo_centroid"

    def __init__(self, field: Union[_BaseField, str]):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        return

    def _build(self) -> Dict:
        return {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }


class GeoLine(Aggregation):
    """
    Geo-Line 聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-geo-line.html

    The geo_line aggregation aggregates all geo_point values within a bucket into a LineString ordered by the chosen
    sort field. This sort can be a date field, for example. The bucket returned is a valid GeoJSON Feature representing
    the line geometry.
    geo_line 聚合将存储桶中的所有 geo_point 值聚合到按所选排序字段排序的 LineString 中。例如，此排序可以是日期字段。返回的存储桶是表示线条
    几何图形的有效 GeoJSON 特征。

    :param point:
        (Required)This option specifies the name of the geo_point field
        （必填）此选项指定 geo_point 字段的名称
    :param sort:
        (Required)This option specifies the name of the numeric field to use as the sort key for ordering the points
        （必填）此选项指定要用作对点进行排序的排序键的数值字段的名称
    :param include_sort:
        (Optional, boolean, default: false)
        This option includes, when true, an additional array of the sort values in the feature properties.

        （可选，布尔值，默认值：false）
        如果为 true，则此选项包括 feature 属性中排序值的附加数组。
    :param sort_order:
        (Optional, string, default: "ASC")
        This option accepts one of two values: "ASC", "DESC".
        The line is sorted in ascending order by the sort key when set to "ASC", and in descending with "DESC".

        （可选，字符串，默认值：“ASC”）
        此选项接受以下两个值之一： “ASC”， “DESC”。
        当设置为 “ASC” 时，该行按排序键升序排序，使用 “DESC” 降序排序。
    :param size:
        (Optional, integer, default: 10000)
        The maximum length of the line represented in the aggregation. Valid sizes are between one and 10000.

        （可选，整数，默认值：10000）
        聚合中表示的线条的最大长度。有效大小介于 1 和 10000 之间。
    """
    type: str = "geo_line"
    def __init__(
            self, point: Union[_BaseField, str], sort: Union[_BaseField, str], include_sort: bool = None,
            sort_order: str = None, size: int = None
    ):
        super().__init__()
        self._point: Union[_BaseField, str] = point
        self._sort: Union[_BaseField, str] = sort
        self._include_sort: bool = include_sort
        self._sort_order: str = sort_order
        self._size: int = size
        return

    def _build(self) -> Dict:
        body: Dict = {
            "point": {
                "field": self._point if isinstance(self._point, str) else self._point._field_name
            },
            "sort": {
                "field": self._sort if isinstance(self._sort, str) else self._sort._field_name
            },
        }
        if self._include_sort is not None:
            body["include_sort"] = self._include_sort
        if self._sort_order:
            body["sort_order"] = self._sort_order
        if self._size is not None:
            body["size"] = self._size
        return body


class MatrixStats(Aggregation):
    """
    矩阵统计聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-matrix-stats-aggregation.html

    The matrix_stats aggregation is a numeric aggregation that computes the following statistics over a set of document fields:
    count
        Number of per field samples included in the calculation.
    mean
        The average value for each field.
    variance
        Per field Measurement for how spread out the samples are from the mean.
    skewness
        Per field measurement quantifying the asymmetric distribution around the mean.
    kurtosis
        Per field measurement quantifying the shape of the distribution.
    covariance
        A matrix that quantitatively describes how changes in one field are associated with another.
    correlation
        The covariance matrix scaled to a range of -1 to 1, inclusive. Describes the relationship between field distributions.

    matrix_stats 聚合是一种数值聚合，用于计算一组文档字段的以下统计信息：
    count
        计算中包含的每个字段样本数。
    mean
        每个字段的平均值。
    variance
        每字段 样本与平均值的分布程度的测量。
    skewness
        每个字段测量量化平均值周围的不对称分布。
    kurtosis
        每个字段测量量化分布的形状。
    covariance
        定量描述一个字段中的变化如何与另一个字段相关联的矩阵。
    correlation
        协方差矩阵缩放到 -1 到 1 的范围（包括 -1 到 1）。描述字段分布之间的关系。

    :param fields:
    :param mode:
        The matrix_stats aggregation treats each document field as an independent sample. The mode parameter controls
        what array value the aggregation will use for array or multi-valued fields. This parameter can take one of the
        following:
            avg
                (default) Use the average of all values.
            min
                Pick the lowest value.
            max
                Pick the highest value.
            sum
                Use the sum of all values.
            median
                Use the median of all values.

        matrix_stats 聚合将每个文档字段视为一个独立的样本。mode 参数控制聚合将用于数组或多值字段的数组值。此参数可以采用以下选项之一：
            avg
                （默认）使用所有值的平均值。
            min
                选择最低值。
            max
                选择最高值。
            sum
                使用所有值的总和。
            median
                使用所有值的中位数。
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value. This is done by adding a set of
        fieldname : value mappings to specify default values per field.
        的 missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。这是通过添加一组 fieldname ： value
        映射来指定每个字段的默认值来完成的。
    """
    type: str = "matrix_stats"

    def __init__(
            self, fields: Union[_BaseField, str],
            mode: Union[Literal["avg", "min", "max", "sum", "median"], MatrixStatsMode] = None, missing: Any = None
    ):
        super().__init__()
        self._fields: Union[_BaseField, str] = fields
        self._mode: Union[Literal["avg", "min", "max", "sum", "median"], MatrixStatsMode, None] = mode
        self._missing: Any = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "fields": self._fields if isinstance(self._fields, str) else self._fields._field_name
        }
        if self._mode:
            body["mode"] = self._mode if isinstance(self._mode, str) else self._mode.value
        if self._missing:
            body["missing"] = self._missing
        return body


class Max(Aggregation):
    """
    最大聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-max-aggregation.html

    A single-value metrics aggregation that keeps track and returns the maximum value among the numeric values extracted
    from the aggregated documents.
    一种单值量度聚合，用于跟踪并返回从聚合文档中提取的数值中的最大值。

    :param field:
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    """
    type: str = "max"

    def __init__(self, field: Union[_BaseField, str], missing: Any = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._missing: Any = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name
        }
        if self._missing:
            body["missing"] = self._missing
        return body


class MedianAbsoluteDeviation(Aggregation):
    """
    中位数绝对偏差聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-median-absolute-deviation-aggregation.html

    This single-value aggregation approximates the median absolute deviation of its search results.
    Median absolute deviation is a measure of variability. It is a robust statistic, meaning that it is useful for
    describing data that may have outliers, or may not be normally distributed. For such data it can be more descriptive
    than standard deviation.
    It is calculated as the median of each data point’s deviation from the median of the entire sample. That is, for a
    random variable X, the median absolute deviation is median(|median(X) - Xi|).

    此单值聚合近似于其搜索结果的中位数绝对偏差。
    中位数绝对偏差是可变性的度量。它是一个稳健的统计数据，这意味着它可用于描述可能具有异常值或可能不是正态分布的数据。对于此类数据，它可能比标准偏
    差更具描述性。
    它计算为每个数据点与整个样本的中位数的偏差的中位数。也就是说，对于随机变量 X，中位数绝对偏差为 median（|median（X） - Xi|）。

    :param field:
    :param compression:
        The naive implementation of calculating median absolute deviation stores the entire sample in memory, so this
        aggregation instead calculates an approximation. It uses the TDigest data structure to approximate the sample
        median and the median of deviations from the sample median. For more about the approximation characteristics of
        TDigests, see Percentiles are (usually) approximate.
        The tradeoff between resource usage and accuracy of a TDigest’s quantile approximation, and therefore the
        accuracy of this aggregation’s approximation of median absolute deviation, is controlled by the compression
        parameter. A higher compression setting provides a more accurate approximation at the cost of higher memory
        usage. For more about the characteristics of the TDigest compression parameter see Compression.

        计算中位数绝对偏差的天真实现将整个样本存储在内存中，因此此聚合会计算近似值。它使用 TDigest 数据结构来近似样本中位数和与样本中位数的偏差
        中位数。有关 TDigest 的近似特征的更多信息，请参阅百分位数是（通常）近似的。
        资源使用与 TDigest 分位数近似值的准确性之间的权衡，以及因此此聚合的中值绝对偏差近似值的准确性，由压缩参数控制。较高的压缩设置以较高的内
        存使用量为代价提供更准确的近似值。有关 TDigest 压缩参数的特征的更多信息，请参阅压缩。
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    """
    type: str = "median_absolute_deviation"

    def __init__(self, field: Union[_BaseField, str], compression: Number = None, missing: Number = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._compression: Number = compression
        self._missing: Number = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._compression is not None:
            body["compression"] = self._compression
        if self._missing is not None:
            body["missing"] = self._missing
        return body


class Min(Aggregation):
    """
    最小聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-min-aggregation.html

    A single-value metrics aggregation that keeps track and returns the minimum value among numeric values extracted
    from the aggregated documents.
    一种单值量度聚合，用于跟踪并返回从聚合文档中提取的数值中的最小值。

    :param field:
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    """
    type: str = "min"

    def __init__(self, field: Union[_BaseField, str], missing: Number = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._missing: Number = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._missing is not None:
            body["missing"] = self._missing
        return body


class PercentileRanks(Aggregation):
    """
    百分位排名聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-percentile-rank-aggregation.html

    A multi-value metrics aggregation that calculates one or more percentile ranks over numeric values extracted from
    the aggregated documents. These values can be extracted from specific numeric or histogram fields in the documents.
    一种多值量度聚合，用于计算从聚合文档中提取的数值的一个或多个百分位排名。这些值可以从文档中的特定数字或直方图字段中提取。

    :param field:
    :param values:
    :param keyed:
        By default the keyed flag is set to true associates a unique string key with each bucket and returns the ranges
        as a hash rather than an array.
        默认情况下，keyed 标志设置为 true，将唯一的字符串键与每个存储桶相关联，并将范围作为哈希而不是数组返回。
    :param hdr:
        HDR Histogram (High Dynamic Range Histogram) is an alternative implementation that can be useful when
        calculating percentiles for latency measurements as it can be faster than the t-digest implementation with the
        trade-off of a larger memory footprint. This implementation maintains a fixed worse-case percentage error
        (specified as a number of significant digits). This means that if data is recorded with values from 1
        microsecond up to 1 hour (3,600,000,000 microseconds) in a histogram set to 3 significant digits, it will
        maintain a value resolution of 1 microsecond for values up to 1 millisecond and 3.6 seconds (or better) for the
        maximum tracked value (1 hour).
        hdr object indicates that HDR Histogram should be used to calculate the percentiles and specific settings for
        this algorithm can be specified inside the object
        number_of_significant_value_digits specifies the resolution of values for the histogram in number of significant
        digits
        The HDRHistogram only supports positive values and will error if it is passed a negative value. It is also not
        a good idea to use the HDRHistogram if the range of values is unknown as this could lead to high memory usage.

        HDR Histogram （High Dynamic Range Histogram） 是一种替代实现，在计算延迟测量的百分位数时非常有用，因为它可能比 t-digest 实现
        更快，但代价是内存占用更大。此实现维护一个固定的 worst-case percentage error（指定为有效数字）。这意味着，如果以 1 微秒到 1 小时
        （3,600,000,000 微秒）的值记录数据，并且直方图设置为 3 位有效数字，则对于不超过 1 毫秒的值，它将保持 1 微秒的值分辨率，对于最大跟踪
        值（1 小时），它将保持 3.6 秒（或更好）的值分辨率。
        hdr 对象表示应使用 HDR 直方图来计算百分位数，并且可以在对象内部指定此算法的特定设置
        number_of_significant_value_digits 指定直方图值的分辨率（以有效位数为单位）
        HDRHistogram 仅支持正值，如果传递负值，则会出错。如果值范围未知，则使用 HDRHistogram 也不是一个好主意，因为这可能会导致高内存使用率。
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be 
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    """
    type: str = "percentile_ranks"

    def __init__(
            self, field: Union[_BaseField, str], values: List[Number],
            hdr: Dict[Literal["number_of_significant_value_digits"], int] = None, keyed: bool = None,
            missing: Number = None
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._values: List[Number] = values
        self._hdr: Dict[Literal["number_of_significant_value_digits"], int] = hdr
        self._keyed: bool = keyed
        self._missing: Number = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
            "values": self._values,
        }
        if self._hdr:
            body["hdr"] = self._hdr
        if self._keyed is not None:
            body["keyed"] = self._keyed
        if self._missing is not None:
            body["missing"] = self._missing
        return body


class Percentiles(Aggregation):
    """
    百分位数聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-percentile-aggregation.html

    A multi-value metrics aggregation that calculates one or more percentiles over numeric values extracted from the
    aggregated documents. These values can be extracted from specific numeric or histogram fields in the documents.
    Percentiles show the point at which a certain percentage of observed values occur. For example, the 95th percentile
    is the value which is greater than 95% of the observed values.
    Percentiles are often used to find outliers. In normal distributions, the 0.13th and 99.87th percentiles represents
    three standard deviations from the mean. Any data which falls outside three standard deviations is often considered
    an anomaly.
    When a range of percentiles are retrieved, they can be used to estimate the data distribution and determine if the
    data is skewed, bimodal, etc.

    一种多值量度聚合，用于计算从聚合文档中提取的数值的一个或多个百分位数。这些值可以从文档中的特定数字或直方图字段中提取。
    百分位数显示一定百分比的观测值出现的点。例如，第 95 个百分位数是大于 95% 观测值的值。
    百分位数通常用于查找异常值。在正态分布中，第 0.13 个和第 99.87 个百分位数代表平均值的三个标准差。任何超出三个标准差的数据通常被视为异常。
    当检索到一系列百分位数时，它们可用于估计数据分布并确定数据是否偏斜、双峰等。

    :param field:
    :param percents:
    :param keyed:
        By default the keyed flag is set to true which associates a unique string key with each bucket and returns the
        ranges as a hash rather than an array.
        默认情况下，keyed 标志设置为 true，它将唯一的字符串键与每个存储桶相关联，并以哈希而不是数组的形式返回范围。
    :param tdigest:
        Approximate algorithms must balance memory utilization with estimation accuracy. This balance can be controlled
        using a compression parameter:
        The TDigest algorithm uses a number of "nodes" to approximate percentiles—the more nodes available, the higher
        the accuracy (and large memory footprint) proportional to the volume of data. The compression parameter limits
        the maximum number of nodes to 20 * compression.

        近似算法必须平衡内存利用率和估计准确性。可以使用 compression 参数控制此平衡
        TDigest 算法使用许多“节点”来近似百分位数 — 可用的节点越多，与数据量成正比的准确性就越高（和较大的内存占用）。compression 参数将最大
        节点数限制为 20 * compression。
    :param hdr:
        HDR Histogram (High Dynamic Range Histogram) is an alternative implementation that can be useful when
        calculating percentiles for latency measurements as it can be faster than the t-digest implementation with the
        trade-off of a larger memory footprint. This implementation maintains a fixed worse-case percentage error
        (specified as a number of significant digits). This means that if data is recorded with values from 1
        microsecond up to 1 hour (3,600,000,000 microseconds) in a histogram set to 3 significant digits, it will
        maintain a value resolution of 1 microsecond for values up to 1 millisecond and 3.6 seconds (or better) for the
        maximum tracked value (1 hour).
        hdr object indicates that HDR Histogram should be used to calculate the percentiles and specific settings for
        this algorithm can be specified inside the object
        number_of_significant_value_digits specifies the resolution of values for the histogram in number of significant
        digits
        The HDRHistogram only supports positive values and will error if it is passed a negative value. It is also not
        a good idea to use the HDRHistogram if the range of values is unknown as this could lead to high memory usage.

        HDR Histogram （High Dynamic Range Histogram） 是一种替代实现，在计算延迟测量的百分位数时非常有用，因为它可能比 t-digest 实现
        更快，但代价是内存占用更大。此实现维护一个固定的 worst-case percentage error（指定为有效数字）。这意味着，如果以 1 微秒到 1 小时
        （3,600,000,000 微秒）的值记录数据，并且直方图设置为 3 位有效数字，则对于不超过 1 毫秒的值，它将保持 1 微秒的值分辨率，对于最大跟踪
        值（1 小时），它将保持 3.6 秒（或更好）的值分辨率。
        hdr 对象表示应使用 HDR 直方图来计算百分位数，并且可以在对象内部指定此算法的特定设置
        number_of_significant_value_digits 指定直方图值的分辨率（以有效位数为单位）
        HDRHistogram 仅支持正值，如果传递负值，则会出错。如果值范围未知，则使用 HDRHistogram 也不是一个好主意，因为这可能会导致高内存使用率。
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    """
    type: str = "percentiles"

    def __init__(
            self, field: Union[_BaseField, str], percents: List[Number] = None, keyed: bool = None,
            tdigest: Dict[Literal["compression"], int] = None,
            hdr: Dict[Literal["number_of_significant_value_digits"], int] = None, missing: str = None
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._percents: List[Number] = percents
        self._keyed: bool = keyed
        self._tdigest: Dict[Literal["compression"], int] = tdigest
        self._hdr: Dict[Literal["number_of_significant_value_digits"], int] = hdr
        self._missing: str = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name,
        }
        if self._percents:
            body["percents"] = self._percents
        if self._keyed is not None:
            body["keyed"] = self._keyed
        if self._tdigest:
            body["tdigest"] = self._tdigest
        if self._hdr:
            body["hdr"] = self._hdr
        if self._missing:
            body["missing"] = self._missing
        return body


class Rate(Aggregation):
    """
    速率聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-rate-aggregation.html

    A rate metrics aggregation can be used only inside a date_histogram or composite aggregation. It calculates a rate
    of documents or a field in each bucket. The field values can be extracted from specific numeric or histogram fields
    in the documents.
    For composite aggregations, there must be exactly one date_histogram source for the rate aggregation to be supported.

    速率指标聚合只能在date_histogram聚合或复合聚合中使用。它计算每个存储桶中文档或字段的速率。可以从文档中的特定数字或直方图字段中提取字段值。
    对于复合聚合，必须只有一个 date_histogram 源才能支持 rate 聚合。

    :param field:
    :param unit:
        The rate aggregation supports all rate that can be used calendar_intervals parameter of date_histogram
        aggregation. The specified rate should compatible with the date_histogram aggregation interval, i.e. it should
        be possible to convert the bucket size into the rate. By default the interval of the date_histogram is used.
            "rate": "second"
                compatible with all intervals
            "rate": "minute"
                compatible with all intervals
            "rate": "hour"
                compatible with all intervals
            "rate": "day"
                compatible with all intervals
            "rate": "week"
                compatible with all intervals
            "rate": "month"
                compatible with only with month, quarter and year calendar intervals
            "rate": "quarter"
                compatible with only with month, quarter and year calendar intervals
            "rate": "year"
                compatible with only with month, quarter and year calendar intervals
        There is also an additional limitations if the date histogram is not a direct parent of the rate histogram. In
        this case both rate interval and histogram interval have to be in the same group: [second, ` minute`, hour, day,
        week] or [month, quarter, year]. For example, if the date histogram is month based, only rate intervals of
        month, quarter or year are supported. If the date histogram is day based, only second, ` minute`, hour, day, and
        week rate intervals are supported.

        rate 聚合支持所有可用于 date_histogram 聚合calendar_intervals参数的 rate。指定的 rate 应与 date_histogram 聚合间隔兼容，
        即应该能够将 bucket 大小转换为 rate。默认情况下，使用 date_histogram 的间隔。
            "rate": "second"
                兼容所有间隔
            "rate": "minute"
                兼容所有间隔
            "rate": "hour"
                兼容所有间隔
            "rate": "day"
                兼容所有间隔
            "rate": "week"
                兼容所有间隔
            "rate": "month"
                仅与月、季度和年日历间隔兼容
            "rate": "quarter"
                仅与月、季度和年日历间隔兼容
            "rate": "year"
                仅与月、季度和年日历间隔兼容
        如果日期直方图不是比率直方图的直接父级，则还会有其他限制。在这种情况下，速率间隔和直方图间隔必须位于同一组中：[second， ' minute'，
        hour， day， week] 或 [month， quarter， year]。例如，如果日期直方图基于月份，则仅支持月、季度或年的汇率间隔。如果日期直方图基于
        日期，则仅支持秒、“分钟”、小时、天和周的速率间隔。
    :param mode:
        By default sum mode is used.
            "mode": "sum"
                calculate the sum of all values field
            "mode": "value_count"
                use the number of values in the field

        默认情况下，使用 sum 模式。
            "mode": "sum"
                计算所有值的总和字段
            "mode": "value_count"
                使用字段中的值数
    """
    type: str = "rate"

    def __init__(
            self, field: Union[_BaseField, str],
            unit: Union[
                Literal["second", "minute", "hour", "day", "week", "month", "quarter", "year"], CalendarInterval
            ] = None,
            mode: Literal["sum", "value_count"] = None,
    ):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._unit: Union[
            Literal["second", "minute", "hour", "day", "week", "month", "quarter", "year"], CalendarInterval, None
        ] = unit
        self._mode: Optional[Literal["sum", "value_count"]] = mode
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name
        }
        if self._unit:
            body["unit"] = self._unit
        if self._mode:
            body["mode"] = self._mode
        return body


class ScriptedMetric(Aggregation):
    """
    脚本化指标聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-scripted-metric-aggregation.html

    A metric aggregation that executes using scripts to provide a metric output.
    使用脚本执行以提供指标输出的指标聚合。

    :param init_script:
        init_script is an optional parameter, all other scripts are required.
        Executed prior to any collection of documents. Allows the aggregation to set up any initial state.
        In the above example, the init_script creates an array transactions in the state object.

        init_script 是可选参数，则所有其他脚本都是必需的。
        在任何文档集合之前执行。允许聚合设置任何初始状态。
        在上面的示例中，init_script 在 state 对象中创建了一个数组 transactions。
    :param map_script:
        Executed once per document collected. This is a required script. If no combine_script is specified, the
        resulting state needs to be stored in the state object.
        In the above example, the map_script checks the value of the type field. If the value is sale the value of the
        amount field is added to the transactions array. If the value of the type field is not sale the negated value
        of the amount field is added to transactions.

        每个收集的文档执行一次。这是必需的脚本。如果未指定 combine_script，则需要将生成的 state 存储在 state 对象中。
        在上面的示例中，map_script 检查 type 字段的值。如果值为 sale，则 amount 字段的值将添加到 transactions 数组中。如果 type 字段
        的值不是 sale，则 amount 字段的否定值将添加到发生业务中。
    :param combine_script:
        Executed once on each shard after document collection is complete. This is a required script. Allows the
        aggregation to consolidate the state returned from each shard.
        In the above example, the combine_script iterates through all the stored transactions, summing the values in
        the profit variable and finally returns profit.

        文档收集完成后，在每个分片上执行一次。这是必需的脚本。允许聚合整合从每个分片返回的状态。
        在上面的示例中，combine_script遍历所有存储的交易，对 profit 变量中的值求和，最后返回 profit。
    :param reduce_script:
        Executed once on the coordinating node after all shards have returned their results. This is a required script.
        The script is provided with access to a variable states which is an array of the result of the combine_script
        on each shard.
        In the above example, the reduce_script iterates through the profit returned by each shard summing the values
        before returning the final combined profit which will be returned in the response of the aggregation.

        在所有分片返回其结果后，在协调节点上执行一次。这是必需的脚本。该脚本可以访问变量 states，该变量是每个分片上combine_script结果的数组。
        在上面的示例中，reduce_script遍历每个分片返回的利润，对值求和，然后返回最终的组合利润，该利润将在聚合的响应中返回。
    :param params:
        Optional. An object whose contents will be passed as variables to the init_script, map_script and
        combine_script. This can be useful to allow the user to control the behavior of the aggregation and for storing
        state between the scripts. If this is not specified, the default is the equivalent of providing:
            "params" : {}

        自选。一个对象，其内容将作为变量传递给 init_script、map_script 和 combine_script。这对于允许用户控制聚合的行为以及在脚本之间存
        储状态非常有用。如果未指定，则默认值等效于提供：
            "params" : {}
    """
    type: str = "scripted_metric"

    def __init__(
            self, init_script: str = None, map_script: str = None, combine_script: str = None,
            reduce_script: str = None, params: Dict[str, Any] = None
    ):
        super().__init__()
        self._init_script: str = init_script
        self._map_script: str = map_script
        self._combine_script: str = combine_script
        self._reduce_script: str = reduce_script
        self._params: Dict[str, Any] = params
        return

    def _build(self) -> Dict:
        body: Dict = {}
        if self._init_script:
            body["init_script"] = self._init_script
        if self._map_script:
            body["map_script"] = self._map_script
        if self._combine_script:
            body["combine_script"] = self._combine_script
        if self._reduce_script:
            body["reduce_script"] = self._reduce_script
        if self._params:
            body["params"] = self._params
        return body


class Stats(Aggregation):
    """
    统计聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-stats-aggregation.html

    A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents.
    The stats that are returned consist of: min, max, sum, count and avg.

    一种多值量度聚合，用于计算从聚合文档中提取的数值的统计信息。
    返回的统计信息包括：min、max、sum、count 和 avg。

    :param field:
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    """
    type: str = "stats"

    def __init__(self, field: Union[_BaseField, str], missing: Any = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._missing: Any = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name
        }
        if self._missing:
            body["missing"] = self._missing
        return body


class StringStats(Aggregation):
    """
    字符串统计信息聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-string-stats-aggregation.html

    A multi-value metrics aggregation that computes statistics over string values extracted from the aggregated
    documents. These values can be retrieved either from specific keyword fields.
    The string stats aggregation returns the following results:

        count - The number of non-empty fields counted.
        min_length - The length of the shortest term.
        max_length - The length of the longest term.
        avg_length - The average length computed over all terms.
        entropy - The Shannon Entropy value computed over all terms collected by the aggregation. Shannon entropy
        quantifies the amount of information contained in the field. It is a very useful metric for measuring a wide
        range of properties of a data set, such as diversity, similarity, randomness etc.

    一种多值度量聚合，用于计算从聚合文档中提取的字符串值的统计信息。可以从特定关键字字段中检索这些值。
    字符串 stats 聚合返回以下结果：
        count - 计数的非空字段数。
        min_length - 最短期限的长度。
        max_length - 最长期限的长度。
        avg_length - 根据所有项计算的平均长度。
        entropy - 根据聚合收集的所有项计算的 Shannon Entropy 值。香农熵量化了场中包含的信息量。它是一个非常有用的指标，用于测量数据集的各
        种属性，例如多样性、相似性、随机性等。

    :param field:
    :param show_distribution:
        The computation of the Shannon Entropy value is based on the probability of each character appearing in all
        terms collected by the aggregation. To view the probability distribution for all characters, we can add the
        show_distribution (default: false) parameter.
        Set the show_distribution parameter to true, so that probability distribution for all characters is returned in the results.

        Shannon Entropy 值的计算基于每个字符出现在聚合收集的所有术语中的概率。要查看所有字符的概率分布，我们可以添加 show_distribution
        （默认： false） 参数。
        将 show_distribution 参数设置为 true，以便在结果中返回所有字符的概率分布。
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default they will be
        ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，它们将被忽略，但也可以将它们视为具有值。
    """
    type: str = "string_stats"

    def __init__(self, field: Union[_BaseField, str], show_distribution: bool = None, missing: Any = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._show_distribution: bool = show_distribution
        self._missing: Any = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name
        }
        if self._show_distribution:
            body["show_distribution"] = self._show_distribution
        if self._missing:
            body["missing"] = self._missing
        return body


class Sum(Aggregation):
    """
    Sum 聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-sum-aggregation.html

    A single-value metrics aggregation that sums up numeric values that are extracted from the aggregated documents.
    These values can be extracted either from specific numeric or histogram fields.
    一种单值度量聚合，用于汇总从聚合文档中提取的数值。可以从特定数值或直方图字段中提取这些值。

    :param field:
    :param missing:
        The missing parameter defines how documents that are missing a value should be treated. By default documents
        missing the value will be ignored but it is also possible to treat them as if they had a value.
        missing 参数定义应如何处理缺少值的文档。默认情况下，缺少该值的文档将被忽略，但也可以将它们视为具有值。
    """
    type: str = "sum"

    def __init__(self, field: Union[_BaseField, str], missing: Number = None):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        self._missing: Number = missing
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name
        }
        if self._missing:
            body["missing"] = self._missing
        return body


class TTest(Aggregation):
    """
    T 检验聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-ttest-aggregation.html

    A t_test metrics aggregation that performs a statistical hypothesis test in which the test statistic follows a
    Student’s t-distribution under the null hypothesis on numeric values extracted from the aggregated documents. In
    practice, this will tell you if the difference between two population means are statistically significant and did
    not occur by chance alone.
    一种t_test量度聚合，用于执行统计假设检验，其中检验统计量在从聚合文档中提取的数值的原假设下遵循 Student 的 t 分布。在实践中，这将告诉您
    两个总体均值之间的差异是否具有统计意义，并且不仅仅是偶然发生的。

    :param a:
    :param b:
    :param type_:
        The t_test aggregation supports unpaired and paired two-sample t-tests. The type of the test can be specified
        using the type parameter:
            "type": "paired"
            "type": "homoscedastic"
            performs two-sample equal variance test
            "type": "heteroscedastic"
            performs two-sample unequal variance test (this is default)

        t_test 聚合支持未配对和配对的双样本 t 检验。可以使用 type 参数指定测试的类型：
            "type": "paired"
            performs paired t-test  执行配对 T 检验
            "type": "homoscedastic"
            执行双样本等方差检验
            "type": "heteroscedastic"
            执行双样本不等方差检验（这是默认设置）
    """
    type: str = "t_test"

    def __init__(
            self,
            a: Union[
                _BaseField, str, Dict[Union[Literal["field", "filter"], Union[_BaseField, str, Iterable[_BaseQueries]]]]
            ],
            b: Union[
                _BaseField, str, Dict[Union[Literal["field", "filter"], Union[_BaseField, str, Iterable[_BaseQueries]]]]
            ],
            type_: Literal["paired", "homoscedastic", "heteroscedastic"] = None
    ):
        super().__init__()
        self._a: Union[
                _BaseField, str, Dict[Union[Literal["field", "filter"], Union[_BaseField, str, Iterable[_BaseQueries]]]]
            ] = a
        self._b: Union[
                _BaseField, str, Dict[Union[Literal["field", "filter"], Union[_BaseField, str, Iterable[_BaseQueries]]]]
            ] = b
        self._type: Optional[Literal["paired", "homoscedastic", "heteroscedastic"]] = type_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "a":
                {"field": self._a}
                if isinstance(self._a, str)
                else (
                    {"field": self._a._field_name}
                    if isinstance(self._a, _BaseField)
                    else {
                        k: (
                            v
                            if isinstance(v, str)
                            else (v._field_name if isinstance(v, _BaseField) else {q.type: q._build() for q in v})
                        )
                        for k, v in self._a.items()
                    }
                ),
            "b":
                {"field": self._b}
                if isinstance(self._b, str)
                else (
                    {"field": self._b._field_name}
                    if isinstance(self._b, _BaseField)
                    else {
                        k: (
                            v
                            if isinstance(v, str)
                            else (v._field_name if isinstance(v, _BaseField) else {q.type: q._build() for q in v})
                        )
                        for k, v in self._b.items()
                    }
                ),
        }
        if self._type:
            body["type"] = self._type
        return body


class TopHits(Aggregation):
    """
    热门点击聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-top-hits-aggregation.html

    A top_hits metric aggregator keeps track of the most relevant document being aggregated. This aggregator is intended
    to be used as a sub aggregator, so that the top matching documents can be aggregated per bucket.
    The top_hits aggregator can effectively be used to group result sets by certain fields via a bucket aggregator. One
    or more bucket aggregators determines by which properties a result set get sliced into.

    top_hits Metric 聚合器会跟踪正在聚合的最相关文档。此聚合器旨在用作子聚合器，以便可以按存储桶聚合最匹配的文档。
    top_hits 聚合器可以有效地用于通过存储桶聚合器按特定字段对结果集进行分组。一个或多个存储桶聚合器确定结果集被切片成哪些属性。

    :param from_:
        from - The offset from the first result you want to fetch.
        from - 与要获取的第一个结果的偏移量。
    :param size:
        size - The maximum number of top matching hits to return per bucket. By default the top three matching hits are
        returned.
        size - 每个存储桶要返回的排名靠前的匹配点击的最大数量。默认情况下，将返回前三个匹配的点击。
    :param sort:
        sort - How the top matching hits should be sorted. By default the hits are sorted by the score of the main query.
        sort - 应如何对最匹配的点击进行排序。默认情况下，点击按主查询的分数排序。
    :param source:
    """
    type: str = "top_hits"

    def __init__(
            self, from_: int = None, size: int = None,
            sort: Union[List[Union[Dict, Sort]], Dict, Sort] = None,
            source: Union[
                List[Union[str, _BaseField]],
                Source,
                Dict[Literal["fields", "includes", "excludes"], Union[str, _BaseField]]
            ] = None
    ):
        super().__init__()
        self._from: int = from_
        self._size: int = size
        self._sort: Union[List[Union[Dict, Sort]], Dict, Sort] = sort
        self._source: Union[
            List[Union[str, _BaseField]],
            Source,
            Dict[Literal["fields", "includes", "excludes"], Union[str, _BaseField]]
        ] = source
        return

    def _build(self) -> Dict:
        body: Dict = {}
        if self._from:
            body["from"] = self._from
        if self._size:
            body["size"] = self._size
        if self._sort:
            body["sort"] = self._sort
        if self._source:
            body["_source"] = (
                self._source._build()
                if isinstance(self._source, Source)
                else (
                    [s if isinstance(s, str) else s._field_name for s in self._source]
                    if isinstance(self._source, List)
                    else {k: (v if isinstance(v, str) else v._field_name) for k, v in self._source.items()}
                )
            )
        return body


class TopMetrics(Aggregation):
    """
    主要指标聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-top-metrics.html

    The top_metrics aggregation selects metrics from the document with the largest or smallest "sort" value.
    top_metrics 聚合从文档中选择具有最大或最小 “sort” 值的量度。

    :param metrics:
        metrics selects the fields of the "top" document to return. You can request a single metric with something like
        "metrics": {"field": "m"} or multiple metrics by requesting a list of metrics like "metrics": [{"field": "m"},
        {"field": "i"}.
        metrics.field supports the following field types:
            - boolean
            - ip
            - keywords
            - numbers
        Except for keywords, runtime fields for corresponding types are also supported. metrics.field doesn’t support
        fields with array values. A top_metric aggregation on array values may return inconsistent results.

        Metrics 选择要返回的 “top” 文档的字段。您可以使用类似 “metrics”： {“field”： “m”} 的内容请求单个指标，也可以通过请求类似
        “metrics”： [{“field”： “m”}， {“field”： “i”} 的指标列表来请求多个指标。
        metrics.field 支持以下字段类型：
            - boolean
            - ip
            - keywords
            - numbers
        除关键字外，还支持相应类型的运行时字段。metrics.field 不支持具有数组值的字段。对数组值进行top_metric聚合可能会返回不一致的结果。
    :param sort:
        The sort field in the metric request functions exactly the same as the sort field in the search request except:
            - It can’t be used on binary, flattened, ip, keyword, or text fields.
            - It only supports a single sort value so which document wins ties is not specified.

        指标请求中的 sort 字段与搜索请求中的 sort 字段的功能完全相同，但以下情况除外：
            - 它不能用于二进制、扁平化、ip、关键字或文本字段。
            - 它仅支持单个排序值，因此未指定哪个文档赢得平局。
    :param size:
        top_metrics can return the top few document’s worth of metrics using the size parameter
        top_metrics可以使用 size 参数返回文档的前几个量度值
    """
    type: str = "top_metrics"

    def __init__(
            self, metrics: Union[Dict, List[Union[Dict, str, _BaseField]], str, _BaseField],
            sort: Union[List[Union[Dict, Sort]], Dict, Sort],
            size: int = None
    ):
        super().__init__()
        self._metrics: Union[Dict, List[Dict], str, _BaseField] = metrics
        self._sort: Union[List[Union[Dict, Sort]], Dict, Sort] = sort
        self._size: int = size
        return

    def _build(self) -> Dict:
        body: Dict = {}
        if self._metrics:
            body["metrics"] = (
                self._metrics
                if isinstance(self._metrics, Dict)
                else (
                    {"field": self._metrics}
                    if isinstance(self._metrics, (str, _BaseField))
                    else [
                        {"field": s} if isinstance(s, (str, _BaseField)) else s
                        for s in self._metrics
                    ]
                )
            )
        if self._sort:
            body["sort"] = self._sort
        if self._size is not None:
            body["size"] = self._size
        return body


class ValueCount(Aggregation):
    """
    值计数聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-valuecount-aggregation.html

    A single-value metrics aggregation that counts the number of values that are extracted from the aggregated
    documents. These values can be extracted either from specific fields in the documents, or be generated by a
    provided script. Typically, this aggregator will be used in conjunction with other single-value aggregations. For
    example, when computing the avg one might be interested in the number of values the average is computed over.
    value_count does not de-duplicate values, so even if a field has duplicates each value will be counted individually.

    一种单值度量聚合，用于计算从聚合文档中提取的值的数量。这些值可以从文档的特定字段中提取，也可以由提供的脚本生成。通常，此聚合器将与其他单值聚合
    结合使用。例如，在计算 avg 时，人们可能对计算平均值的值的数量感兴趣。
    value_count 不会删除重复值，因此即使字段有重复项，也会单独计算每个值。

    :param field:
    """
    type: str = "value_count"

    def __init__(self, field: Union[_BaseField, str]):
        super().__init__()
        self._field: Union[_BaseField, str] = field
        return

    def _build(self) -> Dict:
        body: Dict = {
            "field": self._field if isinstance(self._field, str) else self._field._field_name
        }
        return body


class WeightedAvg(Aggregation):
    """
    加权平均聚合
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/search-aggregations-metrics-weight-avg-aggregation.html

    A single-value metrics aggregation that computes the weighted average of numeric values that are extracted from the
    aggregated documents. These values can be extracted either from specific numeric fields in the documents.
    When calculating a regular average, each datapoint has an equal "weight" … it contributes equally to the final
    value. Weighted averages, on the other hand, weight each datapoint differently. The amount that each datapoint
    contributes to the final value is extracted from the document.
    As a formula, a weighted average is the ∑(value * weight) / ∑(weight)
    A regular average can be thought of as a weighted average where every value has an implicit weight of 1.

    一种单值度量聚合，用于计算从聚合文档中提取的数值的加权平均值。这些值可以从文档中的特定数字字段中提取。
    在计算常规平均值时，每个数据点的“权重”相等......它对最终价值的贡献相同。另一方面，加权平均值对每个数据点的加权不同。从文档中提取每个数据点对最终值的贡献量。
    作为公式，加权平均值是 ∑（值 * 权重） / ∑（权重）
    可以将常规平均值视为加权平均值，其中每个值的隐式权重为 1。

    :param value:
        field       The field that values should be extracted from                                  Required
        missing     A value to use if the field is missing entirely                                 Optional

        field       应从中提取值的字段                                                                 Required
        missing     字段完全缺失时使用的值                                                              Optional
    :param weight:
        field       The field that weights should be extracted from                                 Required
        missing     A weight to use if the field is missing entirely                                Optional

        field       应从中提取权重的字段                                                               Required
        missing     字段完全缺失时使用的权重                                                            Optional
    :param format_:
    """
    type: str = "weighted_avg"

    def __init__(
            self, value: Union[_BaseField, str, Dict[Literal["field", "missing"]], Union[str, _BaseField, Any]],
            weight: Union[_BaseField, str, Dict[Literal["field", "missing"]], Union[str, _BaseField, Any]],
            format_: Any = None
    ):
        super().__init__()
        self._value: Union[_BaseField, str, Dict[Literal["field", "missing"]], Union[str, _BaseField, Any]] = value
        self._weight: Union[_BaseField, str, Dict[Literal["field", "missing"]], Union[str, _BaseField, Any]] = weight
        self._format: Any = format_
        return

    def _build(self) -> Dict:
        body: Dict = {
            "value":
                self._value._field_name
                if isinstance(self._value, _BaseField)
                else (
                    {k: v._field_name if isinstance(v, _BaseField) else v for k, v in self._value.items()}
                    if isinstance(self._value, Dict) else self._value
                ),
            "weight":
                self._weight._field_name
                if isinstance(self._weight, _BaseField)
                else (
                    {k: v._field_name if isinstance(v, _BaseField) else v for k, v in self._weight.items()}
                    if isinstance(self._weight, Dict) else self._weight
                ),
        }
        if self._format:
            body["format"] = self._format
        return body





