""""""
from itertools import groupby


class Statistic:
    """系统统计分析"""
    LogIndex = 'dsm-*'  # 日志索引前缀

    def __init__(self, client):
        self.es_client = client

    def statistic_index(self):
        resp = self.es_client.ilm.explain_lifecycle(self.LogIndex)
        return [{
            'index': index,
            'created_at': item['lifecycle_date_millis'],
            'phase': item['phase'],
        } for index, item in resp['indices'].items()]

    def statistic_source(self):
        """数据源统计
        数据量大时 耗时应该很大可能需要优化 TODO
        """
        aggregations = {
            '_source_bucket': {
                "terms": {"field": "_sourceid", "size": 10000},
                "aggs": {'_time_max': {"max": {"field": "_time"}},
                         '_time_min': {"min": {"field": "_time"}}}
            },
            '_source_index': {
                "terms": {"field": "_index", "size": 10000},
                "aggs": {'_time_max': {"max": {"field": "_time"}},
                         '_time_min': {"min": {"field": "_time"}}}
            }
        }
        resp = self.es_client.search(index=self.LogIndex, body={'aggs': aggregations},
                                     size=0, track_total_hits=False)
        return {
            '_sourceid': [{
                    '_sourceid': aggregation['key'],
                    'count': aggregation['doc_count'],
                    '_time_min': aggregation['_time_min']['value'],
                    '_time_max': aggregation['_time_max']['value']
                }
                for aggregation in resp['aggregations']['_source_bucket']['buckets']],
            'index': [{
                    '_index': aggregation['key'],
                    'count': aggregation['doc_count'],
                    '_time_min': aggregation['_time_min']['value'],
                    '_time_max': aggregation['_time_max']['value']
                }
                for aggregation in resp['aggregations']['_source_index']['buckets']]
        }

    def statistic_model(self):
        """数据模型统计"""
        resp = self.es_client.cat.indices(index=self.LogIndex, bytes='b', format='json')
        for item in resp:
            item['alia'] = item['index'].rsplit('-', 1)[0]
        resp.sort(key=lambda x: x['alia'])
        result = []
        for alia, indexes in groupby(resp, key=lambda x: x['alia']):
            indexes = [{
                'index': index['index'],
                'count': int(index['docs.count']),
                'size': int(index['store.size']),
                'health': index['health']
            } for index in indexes]
            result.append({
                'alia': alia,
                'indexes': indexes,
                'count': sum(item['count'] for item in indexes),
                'size': sum(item['size'] for item in indexes)
            })
        return result

    def statistic_disk(self):
        """磁盘统计"""
        resp = self.es_client.cat.allocation(bytes='b', format='json')
        nodes = [{
            'node': item['node'],
            'host': item['host'],
            'used': int(item['disk.used']),
            'avail': int(item['disk.avail']),
            'total': int(item['disk.total'])
        } for item in resp if item.get('disk.avail')]
        return nodes

    def statistic_dsm(self, db):
        resp = db.execute_sql('SELECT idx as _index, mod_id, doc_cnt, rt_min, rt_max, st_sz FROM batch;')
        resp2 = db.execute_sql('SELECT id as _sourceid, doc_cnt, rt_min, rt_max, st_sz FROM device;')

        _index = [dict(row._mapping) for row in resp]
        _index.sort(key=lambda x: x['mod_id'])

        model = [
            {'model_id': model_id,
             'doc_cnt': sum(i['doc_cnt'] for i in items),
             'rt_min': min(i['rt_min'] for i in items),
             'rt_max': max(i['rt_max'] for i in items),
             } for model_id, items in
            {model_id: list(items_) for model_id, items_ in groupby(_index, key=lambda x: x['mod_id'])}.items()
        ]
        return {
            'source': [dict(row._mapping) for row in resp2],
            'model': model
        }


class AlertStatistic:

    AlertIndex = 'dsm-7bd9943ac75011ef817bf635d424d390'

    def __init__(self, client):
        self.es_client = client

    def statistic(self):
        query = {'query': {'bool': {'must': [
            {'range': {'_time': {'gte': 'now-30d', 'lt': 'now'}}}
        ]}},
                 'size': 10,
                 'sort': [{'_time': {'order': 'desc'}}]}
        aggregations = {
            'level': {"terms": {"field": "level", "size": 20}},
            'rule_type': {"terms": {"field": "rule_type", "size": 20}},
            'trend': {"date_histogram": {"field": "_time",
                                         "calendar_interval": "day",
                                         "format": "yyyy-MM-dd"}}
        }
        resp = self.es_client.search(index=self.AlertIndex,
                                     body={**query, 'aggs': aggregations},
                                     size=10, track_total_hits=False)

        return {k: {item['key']: item['doc_count'] for item in v['buckets']} for k, v in resp['aggregations'].items()}


if __name__ == '__main__':
    from common.package.elastic_db import es_db
    from common.package.db import Postgres
    es_db.init(hosts='http://192.168.101.79:9200')

    db = Postgres()
    db.init(url='postgresql+psycopg2://dsm:dsm@192.168.101.79:5432/dsm', pool_pre_ping=True)

    def test_search():
        statistic = Statistic(es_db.client)
        r1 = statistic.statistic_disk()
        r4 = statistic.statistic_dsm(db)

        # r0 = statistic.statistic_index()
        # r2 = statistic.statistic_source()
        # r3 = statistic.statistic_model()


    def test_alert():
        alert_statistic = AlertStatistic(es_db.client)
        r1 = alert_statistic.statistic()
        print(r1)

    test_search()
    # test_alert()
