"""KAFKA监控指标"""
import time
from common.package.kafka_db import KafkaConsumer, KafkaAdmin
from confluent_kafka import ConsumerGroupTopicPartitions, TopicPartition


class KafkaMetric:

    RetryTimes = 10

    def __init__(self, admin, consumer):
        self.admin = admin
        self.consumer = consumer

    def _group_metric(self):
        groups = self.admin.list_consumer_groups()
        for i in range(self.RetryTimes):
            if groups._state == 'FINISHED':
                if groups._result.errors:
                    raise Exception('kafka metric group error')
                return groups._result.valid
            time.sleep(1)
        raise Exception('kafka metric group timeout')  # 超时

    def _group_offset_metric(self, groups):
        futures = [self.admin.list_consumer_group_offsets([ConsumerGroupTopicPartitions(group.group_id)]) for group in
                   groups]
        for i in range(self.RetryTimes):
            if all([v._state == 'FINISHED'for future in futures for k, v in future.items()]):
                return [v._result for future in futures for k, v in future.items()]
            time.sleep(1)
        raise Exception('kafka metric offset timeout')  # 超时

    def metric(self):
        meta = self.admin.list_topics()

        groups = self._group_metric()
        group_offsets = self._group_offset_metric(groups)

        topic_partitions = [(topic, partition)
                            for topic, topic_meta in meta.topics.items() for partition in topic_meta.partitions]
        result = [self.consumer.get_watermark_offsets(TopicPartition(*topic_partition))
                  for topic_partition in topic_partitions]
        topic_partiton_map = dict(zip(topic_partitions, result))

        topics = []
        for topic, v in meta.topics.items():
            topic_partitions = []
            topic_message_count = 0
            for partition in v.partitions:
                partition_start_offset = topic_partiton_map.get((topic, partition), (0, 0))[0]
                partition_end_offset = topic_partiton_map.get((topic, partition), (0, 0))[1]
                partition_count = partition_end_offset - partition_start_offset
                topic_partitions.append({'partition': partition,
                                         'start_offset': partition_start_offset,
                                         'end_offset': partition_end_offset,
                                         'count': partition_count,
                                         })
                topic_message_count += partition_count
            topics.append({'topic': topic, 'topic_message_count': topic_message_count,
                           'topic_partitions': topic_partitions})

        groups_offset = []
        total_lag = 0
        for group_k in group_offsets:
            group_lag = 0
            group_partition_offsets = []

            for topic_partition in group_k.topic_partitions:
                lag = topic_partiton_map.get((topic_partition.topic, topic_partition.partition), (0, 0))[1] - \
                      topic_partition.offset
                if lag < 0:
                    lag = 0

                group_partition_offsets.append({
                    'topic': topic_partition.topic,
                    'partition': topic_partition.partition,
                    'offset': topic_partition.offset,
                    'lag': lag
                 })
                group_lag += lag
            groups_offset.append({
                'group_id': group_k.group_id,
                'group_partition_offsets': group_partition_offsets,
                'group_lag': group_lag
            })
            total_lag += group_lag

        indicator = {
            'cluster_id': meta.cluster_id,
            'controller_id': meta.controller_id,
            'lag': total_lag,
            'brokers': [{'host': f"{v.host}:{v.port}", 'id': k} for k, v in meta.brokers.items()],
            'topics': topics,
            'groups': [{'group_id': item.group_id, 'state': item.state.name} for item in groups],
            'groups_offset': groups_offset
        }

        return indicator


if __name__ == '__main__':
    config = {'bootstrap.servers': '192.168.101.7:9092'}

    metric_admin = KafkaAdmin()
    metric_kconsumer = KafkaConsumer()

    metric_admin.init(**config)
    metric_kconsumer.init(**{**config, 'group.id': '__metric_indicator'})

    kafka_metric = KafkaMetric(metric_admin.admin, metric_kconsumer.consumer)
    metric = kafka_metric.metric()
    print(metric)
