"""
elasticsearch
分组统计并转换为df
"""
from functools import reduce
from pandas import DataFrame, MultiIndex, notnull
from elasticsearch_dsl import Search, A


class GroupA:
    """分组"""
    def __init__(self, field, alias=None, **kwargs):
        self.alias = alias or f'{kwargs["agg_type"]}_{field}'
        self.group = A(field=field, **kwargs)


class MetricA:
    def __init__(self, field, alias=None, **kwargs):
        self.alias = alias or f'{kwargs["name_or_agg"]}_{field}'
        self.conversion = {'type': '/', 'number': 3}
        self.round = 3
        self.metric = A(field=field, **kwargs)


class PipeLineSearch:
    """"""
    def __init__(self, rows, cols, metrics):
        self._rows = []
        self._cols = []
        self._groups = []
        self._metrics = []

        self.rows = rows
        self.cols = cols
        self.metrics = metrics

    def build_query(self, s):
        self._rows = [GroupA(**row) for row in self.rows]
        self._cols = [GroupA(**cols) for cols in self.cols]

        self._groups = [*self._rows, *self._cols]
        self._metrics = [MetricA(**metric) for metric in self.metrics]
        if self._groups:
            for metric in self._metrics:
                self._groups[-1].group.bucket(metric.alias, metric.metric)

            reduce(lambda g1, g2: g1.group.bucket(g2.alias, g2.group), self._groups)
            s.aggs.bucket(self._groups[0].alias, self._groups[0].group)
            return s.to_dict()
        for metric in self._metrics:
            s.aggs.bucket(metric.alias, metric.metric)
        return s.to_dict()

    def _process_agg(self, bucket, indexes=(), names=()):
        """
        Recursively extract agg values
        :param bucket: a bucket contains either sub-buckets or a bunch of aggregated values
        :return: a list of tuples: (index_name, index_tuple, row)
        """
        # for each agg, yield a row
        row = {}
        for k, v in bucket.items():
            if isinstance(v, dict):
                if 'buckets' in v:
                    for sub_bucket in v['buckets']:
                        if 'key_as_string' in sub_bucket:
                            key = sub_bucket['key_as_string']
                        else:
                            key = sub_bucket['key']
                        for x in self._process_agg(sub_bucket,
                                                  indexes + (key,),
                                                  names + (k,)):
                            yield x
                elif 'value' in v:
                    row[k] = v['value']
                elif 'values' in v:  # percentiles
                    # row = v['values']
                    row.update(v['values'])
                else:
                    row.update(v)  # stats
            else:
                if self._metrics:
                    continue
                if self._groups and names:
                    if names[-1] == self._groups[-1].alias:
                        if k == 'doc_count':  # count docs
                            row['doc_count'] = v

        if len(row) > 0:
            yield (names, indexes, row)

    def to_df(self, result):
        tuples = list(self._process_agg(result['aggregations']))
        _index_names = list(tuples[0][0])
        _values = []
        _indexes = []
        for t in tuples:
            _, _index, row = t
            _values.append(row)
            if len(_index) > 0:
                _indexes.append(_index)

        if len(_indexes) >= 1:
            row_index = MultiIndex.from_tuples(_indexes, names=_index_names)
        else:
            row_index = None
        df = DataFrame(data=_values, index=row_index)
        return df

    def format_metrics(self, df):
        """格式化 保留小数 排序 xxx"""
        for metric in self._metrics:
            if metric.metric.name == 'percentiles':  # 百分比需要单独处理
                # rename ==>
                continue

            if metric.conversion:  # 换算
                df[metric.alias] = df[metric.alias] * metric.conversion['number'] \
                    if metric.conversion['type'] == '*' else df[metric.alias] / metric.conversion['number']
            if metric.round:  # 小数位数
                df[metric.alias] = round(df[metric.alias], metric.round)
        df = df.where(notnull(df), 0)
        return df

    def execute(self, s):
        self.build_query(s)
        resp = s.execute()
        resp_dict = resp.to_dict()
        df = self.to_df(resp_dict)
        print(df)
        resp = self.format_metrics(df)
        print(resp)
        return resp


if __name__ == '__main__':
    from common.package.elastic_db import es_db
    es_db.init(hosts='http://192.168.101.79:9200')

    index = 'dsm-1086afd495bd11efae39622bfe3b1ffc'
    raw = {'query': {'match_all': {}}, 'size': 10}

    def agg_example():
        rows = [{'name_or_agg': 'terms', 'field': '_host', 'alias': 'xx', 'order': {'_key': 'desc'}}]
        cols = [{'name_or_agg': 'terms', 'field': '_sourceid', 'alias': 'yy'}]

        metrics = [
            {'name_or_agg': 'cardinality', 'field': '_host', 'alias': 'zz'},
            {'name_or_agg': 'value_count', 'field': '_host', 'alias': 'zc'},
            {'name_or_agg': 'percentiles', 'field': 'status_code', 'percents': [1, 50, 99], 'keyed': True},
        ]

        pipeline = PipeLineSearch(rows=rows, cols=cols, metrics=metrics)

        s = Search(using=es_db.client)
        s.from_dict(d=raw)
        pipeline.execute(s)


    agg_example()
