import asyncio
from copy import deepcopy
from package.connector.elastic_db import es_db
from functools import reduce
from elasticsearch_dsl import A


class SQLSearch:
    pass


class Elasticsearch:
    pass


class DISearch:
    """平台搜索"""
    CompositeSize = 1000

    @classmethod
    def _build_composite_aggs(cls, groups, metrics, size=1000):
        """composite 分组"""
        sources = []
        for group in groups:
            agg_type, field = group['name_or_agg'], group['field']
            sources.append({f'{agg_type}_{field}': A(**group)})
        composite_agg = A('composite', sources=sources, size=size)

        for metric in metrics:  # 分组指标
            metric_agg = A(**metric)
            composite_agg.bucket(f'{metric_agg.name}_{metric_agg.field}', metric_agg)
        return {'composite': composite_agg.to_dict()}

    @classmethod
    def _build_multi_terms_aggs(cls, groups, metrics):
        """多层级 terms分组"""
        multi_groups = [A(**group) for group in groups]
        for metric in metrics:
            metric_agg = A(**metric)
            multi_groups[-1].bucket(f'{metric_agg.name}_{metric_agg.field}', metric_agg)

        reduce(lambda g1, g2: g1.bucket(f'{g2.name}_{g2.field}', g2), multi_groups)
        
        multi_group_agg = multi_groups[0]
        return {f'{multi_group_agg.name}_{multi_group_agg.field}': multi_group_agg}
    
    @classmethod
    def build_group_aggs(cls, groups_option):
        """分组 统计"""
        group_meta = groups_option.get('meta', {'type': 'composite', 'size': cls.CompositeSize})
        groups = groups_option.get('groups', [])
        metrics = groups_option.get('metrics', [])

        if not groups:  # 无分组信息
            return {}
        
        if group_meta.get('type') == 'composite':
            size = group_meta.get('size', cls.CompositeSize)
            return cls._build_composite_aggs(groups, metrics, size=size)
        return cls._build_multi_terms_aggs(groups, metrics)
    
    @classmethod
    def build_metric_aggs(cls, metric_options):
        """指标 统计"""
        metric_aggs = [A(**metric_option) for metric_option in metric_options]
        return {f'{metric_agg.name}_{metric_agg.field}': metric_agg.to_dict() 
                for metric_agg in metric_aggs}

    @classmethod
    def build_aggs(cls, configure):
        """分组 指标统计"""
        group_options = configure.get('group_options', {})
        group_agg = cls.build_group_aggs(group_options)

        metric_options = configure.get('metric_options', {})
        metric_agg = cls.build_metric_aggs(metric_options)

        return {**group_agg, **metric_agg}

    @classmethod
    def build_query(cls, scope, condition):
        """构建搜索条件
        """
        indices = ["dsm-0e7f0cbe1a9611f0881a867292b5caae-000001", "dsm-0d52be081a9611f086f6867292b5caae-000001"]
        return indices, condition
    
    @classmethod
    def search(cls, search, size=None):
        """直接搜索"""
        indices, query = cls.build_query(**search)
        resp = es_db.search(index=indices, body=query, size=size)
        return resp

    @classmethod
    async def async_search(cls, search):
        indices, query = cls.build_query(**search)

        async_task = es_db.async_op('submit', index=indices, body=query, size=1, timeout='3s')
        async_task_id = async_task.get('id', None)
        while True:
            if not async_task.get('is_running'):  # True后台执行中 False 执行完成
                yield async_task
                break
            
            await asyncio.sleep(5)  # 5秒刷新一次数据

            async_task_id = async_task['id']
            async_task = es_db.async_op('get', id=async_task_id)
            yield async_task

    # aggs 扁平化
    @classmethod
    def flattern_aggs(cls, bucket, row: dict = None, key_as_tuple=False):
        """aggs 转换为表格形式"""
        row = {} if row is None else deepcopy(row)
        is_leaf = False
        for k, v in bucket.items():
            if isinstance(v, dict):
                if 'buckets' in v:
                    for sub_bucket in v['buckets']:
                        key = sub_bucket['key_as_string'] if 'key_as_string' in sub_bucket else sub_bucket['key']
                        if isinstance(key, dict):
                            if key_as_tuple:
                                row.update({k: tuple(key.values())})
                        else:
                            row.update({k: key})
                        yield from cls.flattern_aggs(sub_bucket, row=row)
                elif 'value' in v:
                    row[k] = v['value']
                    is_leaf = True
                elif 'values' in v:  # percentiles as_string不返回
                    row.update({f'{k}_{_p}': v for _p, v in v['values'].items() if '_as_string' not in _p})
                    is_leaf = True
                else:
                    row.update(v)  # stats
        if is_leaf:
            yield row
