import json
import threading
from kafka import KafkaConsumer, TopicPartition
from kafka.errors import KafkaError, KafkaConfigurationError
from vulcanus.log.log import LOGGER


# enable_auto_commit=False时, 消费后CURRENT-OFFSET不会增加
# auto_offset_reset=latest启动时, CURRENT-OFFSET默认为最新处, 之前的记录不会被消费, 新的记录才会被消费, 影响首次获取数据
# enable_auto_commit=False, auto_offset_reset=earliest, 重启时总是能获取所有的数据
# enable_auto_commit=False, auto_offset_reset=latest, 总是默认current=end offset, 所以每次启动都是没有内容
# enable_auto_commit=True, auto_offset_reset=latest, 第一次时启动时到latest处(没有内容)，
#   后续每次启动都能获取end_offset - current_offset的全部内容, 且会更新current-offset
# enable_auto_commit=True, auto_offset_reset=earliest, 第一次时启动时到earliest(全部内容)，
#   后续每次启动都能获取end_offset - current_offset的全部内容, 且会更新current-offset
# 默认 enable_auto_commit=True, auto_offset_reset=latest


class KafkaCollector(threading.Thread):
    def __init__(self, server: str = 'localhost:9092', topics: list = None,
                 group: str = None, callback: '<class function>' = None):
        """
        kafka消费者线程类
        server_ip: kafka服务器ip
        port: kafka端口号
        topics: kafka主题列表
        group: kafka消费组id, 默认不加入任何组
        callback: 消费者回调函数, 函数原型<callback(value, topic)，
                  value的数据格式需参考订阅的topic
        """
        super().__init__()
        self.kafka_config = {
            "bootstrap_servers": f"{server}",
            "group_id": group,
            "enable_auto_commit": True,
            "auto_offset_reset": "earliest",
            "value_deserializer": lambda m: json.loads(m.decode('utf-8')),
        }
        self.cb = callback
        try:
            LOGGER.info(f"kafka collector topics:{topics},kafka_config:{self.kafka_config}")
            self.consumer = KafkaConsumer(*topics, **self.kafka_config)
        except KafkaError as e:
            self.consumer = []
            LOGGER.error(f'Connect to kafka {server}:{topics} failed. {e}')

    def run(self):
        """
        线程回调函数
        """
        for message in self.consumer:
            self.cb(message.value, message.topic)


class ConsumerForKafka:
    def __init__(self, server: str = 'localhost:9092', topic: str = None, group_id: str = None, partition: int = 0):
        """
        kafka消费者
        server_ip: kafka服务器ip
        port: kafka端口号
        topics: kafka主题列表
        group: kafka消费组id, 默认不加入任何组
        """
        self.kafka_config = {
            "bootstrap_servers": f"{server}",
            "group_id": group_id,
            "enable_auto_commit": True,
            "auto_offset_reset": "earliest",
            "value_deserializer": lambda m: json.loads(m.decode('utf-8')),
        }
        self.topic = topic
        self.partition = partition

    def set_group_id(self, group_id):
        self.kafka_config["group_id"] = group_id

    def set_topic(self, topic):
        self.topic = topic

    def consumer_client(self):
        consumer = None
        try:
            consumer = KafkaConsumer(**self.kafka_config)
        except (KafkaConfigurationError, Exception) as e:
            LOGGER.error(f"kafka config error:{e}")
        return consumer

    def seek_range_time_data(self, start_time, end_time):
        """查询时间段kafka数据
        params：
            start_time：开始时间
            end_time：结束时间
        return：
            返回start_time和end_time之间的kafka数据
        """
        consumer = self.consumer_client()
        if not consumer:
            LOGGER.error("get consumer error")
            return []
        res_data = list()
        # 获取topic的所有分区id
        partition_list = consumer.partitions_for_topic(self.topic)
        assigned_topics = [TopicPartition(topic=self.topic, partition=index) for index in partition_list]
        consumer.assign(assigned_topics)
        partitions = consumer.assignment()
        partitions_start_timestamp = {partition: start_time for partition in partitions}
        partitions_end_timestamp = {partition: end_time for partition in partitions}
        partitions_map = consumer.offsets_for_times(partitions_start_timestamp)
        end_offsets = consumer.end_offsets(list(partitions_end_timestamp.keys()))
        LOGGER.info(f"get {start_time} {end_time} start:{partitions_map.items()} end:{end_offsets}")
        for partition, timestamp in partitions_map.items():
            end_offset = end_offsets.get(partition)
            if end_offset is None or (timestamp is None and end_offset == 0):
                LOGGER.info(f"not find event data")
                continue
            seek_offset = 0
            if timestamp is not None:
                seek_offset = timestamp[0]
            consumer.seek(partition, seek_offset)
            for msg in consumer:
                if msg.value['Timestamp'] > end_time:
                    consumer.close()
                    break
                res_data.append(msg.value)
                if msg.offset >= end_offset - 1:
                    consumer.close()
                    break
        return res_data
