import time
import json
import pandas as pd
from setting import setting
from package.connector.sql_db import db
from package.connector.redis_db import rdb
from package.connector.elastic_db import es_db
from package.connector.kafka_db import kafka_consumer
from common.cache import readonly_index_caculated_cache, source_intime_cache
from model.model import StatisticDSM
from sqlalchemy.dialects.postgresql import insert
from package.fastapi.job import scheduler_job


class Statistic:

    @classmethod
    def _index_info(cls):
        resp = es_db.client.cat.aliases(name='dsm-*', format='json', h='index,alias,is_write_index')
        write_index_alias_map = {item['alias']: item['index'] for item in resp if item.get('is_write_index') == 'true'}
        readonly_indexes = [item['index'] for item in resp if item.get('is_write_index') == 'false']
        return readonly_indexes, write_index_alias_map

    @classmethod
    def _statistic_sourceid_intime_count(cls, df):
        """数据源实时 流入数据量统计"""
        data = df.groupby('_sourceid')['_sourceid'].count().to_dict()
        for _sourceid, count in data.items():
            source_intime_cache.set(_sourceid, count, ex=60)

    @classmethod
    def _statistic_group_count(cls, df):
        """数据源实时 进行统计"""
        return df.groupby(['_index', '_sourceid']).agg({'_time': {'count', 'min', 'max'}})['_time'].reset_index()

    @classmethod
    def _statistic_readonly_index(cls, index):
        """只读索引统计"""
        aggregations = {
            '_source_bucket': {
                "terms": {"field": "_sourceid", "size": 10000},
                "aggs": {'_time_max': {"max": {"field": "_time"}},
                         '_time_min': {"min": {"field": "_time"}}}
            },
            'count': {"value_count": {"field": "_uuid"}},
            '_time_max': {"max": {"field": "_time"}},
            '_time_min': {"min": {"field": "_time"}}
        }
        resp = es_db.client.search(index=index, body={'aggs': aggregations}, size=0, track_total_hits=False)

        return {'id': index,
                'count': resp['aggregations']['count']['value'],
                'firsted_at': resp['aggregations']['_time_min']['value'],
                'lasted_at': resp['aggregations']['_time_max']['value'],
                'source': {
                    aggregation['key']: {
                        'count': aggregation['doc_count'],
                        'firsted_at': aggregation['_time_min']['value'],
                        'lasted_at': aggregation['_time_max']['value']} 
                        for aggregation in resp['aggregations']['_source_bucket']['buckets']}
                 }

    @classmethod
    def _statistic_data(cls, df):
        return [{'id': id, 
                 'count': int(group_df['count'].sum()),
                 'firsted_at': int(group_df['min'].min()),
                 'lasted_at': int(group_df['max'].max()),
                 'source': {
                     sid: {
                         'count': int(s_group_df['count'].sum()),
                         'firsted_at': int(s_group_df['min'].min()),
                         'lasted_at': int(s_group_df['max'].max())
                     } for sid, s_group_df in group_df.groupby('_sourceid')
                    }
                 } 
                for id, group_df in df.groupby('_index')]


class CronTask:

    @classmethod
    def _record_readonly_index(model_data):
        """记录冷索引统计结果"""
        with db.SessionLocal() as s:
            stmt = insert(StatisticDSM.__table__).values(model_data)
            StatisticDSM(is_writeable=True, **model_data)

            stmt = stmt.on_conflict_do_update(index_elements=["id"], set_=model_data)
            s.execute(stmt)
            s.commit()

    @classmethod
    def statistic_readonly_index(cls):
        """统计冷索引下 统计信息
        """
        resp = es_db.client.cat.aliases(name='dsm-*', format='json', h='index,is_write_index')
        readonly_indexes = [item['index'] for item in resp if item.get('is_write_index') == 'false']

        for readonly_index in readonly_indexes:
            if readonly_index_caculated_cache.get(readonly_index):
                continue

            data = Statistic._statistic_readonly_index(readonly_index)
            cls._record_readonly_index(data)
            readonly_index_caculated_cache.set(readonly_index)
    
    @classmethod
    def cron_index_ilm(cls):
        """索引生命周期检测"""
        scheduler_job.add_job(job_id='statistic_readonly_index', 
                              func=cls.statistic_readonly_index, 
                              cron={'interval': 5, 'unit': 'minute'})

class KafkaTask:
    NormalizedTopic = ['dev_log_udt', 'dev_log_dft']

    @classmethod
    def _merge_data(cls, df, combine):
        """数据合并"""
        return pd.concat(df, combine)

    @classmethod
    def _batch_update_statistic(cls, datas):
        """热索引实时更新"""
        with db.SessionLocal() as s:
            for data in datas:
                if orm := StatisticDSM.get_item(s, filter_=[StatisticDSM.id == data['id']], error=False):
                    orm.count = data['count'] + orm.count
                    orm.firsted_at = min(orm.firsted_at, data['firsted_at'])
                    orm.lasted_at = min(orm.lasted_at, data['lasted_at'])

                    for sid, source in data['source'].items():
                        orm.source[sid] = source if sid not in orm.source else {
                            'count': source['count'] + orm.source[sid]['count'],
                            'firsted_at': min(source['firsted_at'], orm.source[sid]['firsted_at']),
                            'lasted_at': max(source['lasted_at'], orm.source[sid]['lasted_at'])}
                    s.add(orm)
                    continue

                orm = StatisticDSM(**data)
                s.add(orm)
            s.commit()

    @classmethod
    def consume(cls):
        """实时数据计算 """

        read_indexes, index_mapping = Statistic._index_info()
        kafka_consumer.consumer.subscribe(cls.NormalizedTopic)
        
        combine = pd.DataFrame([], columns=['_index', '_sourceid', 'min', 'count', 'max'])

        start_ts = int(time.time())
        while True:
            messages = kafka_consumer.consume(num_messages=10000, timeout=30)
            if not messages:
                continue

            df = pd.DataFrame(({**json.loads(item.value()), '_index': index_mapping.get(item.key().decode().strip(','))}
                              for item in messages if not item.error()), 
                              columns=['_time', '_index', '_sourceid'])
            
            # 记录数据源实时 进来的数据
            Statistic._statistic_sourceid_intime_count(df)
            
            # 分组统计计数
            df = Statistic._statistic_group_count(df)
            combine = pd.concat([df, combine])

            end_ts = int(time.time())

            span = end_ts - start_ts
            if span > 60:  # 1min 更新一次
                if combine.empty:
                    continue
                
                datas = Statistic._statistic_data(combine)
                cls._batch_update_statistic(datas)
                combine = pd.DataFrame([], columns=['_index', '_sourceid', 'min', 'count', 'max'])
                start_ts = end_ts


if __name__ == '__main__':
    db.init(url=setting.pg_uri, pool_pre_ping=True)
    rdb.init(host=setting.redis_host, password=setting.redis_password)
    es_db.init(hosts=setting.elasticsearch_hosts, http_auth=setting.elasticsearch_auth)

    kafka_consumer.init(**{'bootstrap.servers': setting.kafka_servers, **setting.kafka_options, 'group.id': 'collector_consumer_task'})

    KafkaTask.consume()

