import pandas as pd
from setting import setting
from package.connector.sql_db import db
from package.connector.redis_db import rdb
from package.connector.elastic_db import es_db
from package.connector.kafka_db import kafka_consumer
from model.model import ParserGroup
from common.collector_common.parser import GroupParser


class KafkaTask:
    Topic = ['todo_parser_queue']

    @classmethod
    def load_rule(cls):
        with db.SessionLocal() as s:
            items = ParserGroup.get_items(s)
            groups = [{'id': group.id, 
                    'rules': [{'id': rule.id, 
                                'identifiers': rule.identifiers} for rule in group.rules]} for group in items]
            rule_config_map = {rule.id: {'model_id': rule.model_id, 'extract':rule.extract, 'transforms': rule.transforms } 
                            for group in items for rule in group.rules}
        return groups, rule_config_map
    
    @classmethod
    def format_messages(messages):
        for message in messages:
            if _sourceid := message.get('_sourceid'):  # 采集 | logstash接收
                yield message
                continue
            
            # 外部直接发送到kafka
            yield {**message, '_sourceid': '11111111111111111111111111111111'}  # 外部直接发送到kafka的数据

    @classmethod
    def consume(cls):
        """实时数据计算"""
        kafka_consumer.consumer.subscribe(cls.NormalizedTopic)
        groups, config_map = cls.load_rule()

        while True:
            messages = kafka_consumer.consume(num_messages=5000, timeout=1)
            if not messages:
                continue
            

            df = pd.DataFrame(({'_sourceid': m.get('_sourceid', '0'* 32), '_raw': m.get('_raw', '')} 
                               for m in messages), 
                               columns=['_sourceid', '_raw'])
            
            datas = GroupParser.parser(df, groups, config_map)

            # bulk() 批量写入数据库


if __name__ == '__main__':
    db.init(url=setting.pg_uri, pool_pre_ping=True)
    rdb.init(host=setting.redis_host, password=setting.redis_password)
    es_db.init(hosts=setting.elasticsearch_hosts, http_auth=setting.elasticsearch_auth)

    kafka_consumer.init(**{'bootstrap.servers': setting.kafka_servers, **setting.kafka_options, 'group.id': 'collector_consumer_task'})

    KafkaTask.consume()

