"""
使用实例代码：

def generate_string(length):
    result = ""
    for _ in range(length):
        result += "a"
    return result


def producer_test(brokers, topic, message_size, message_count, test_count):
    kafka_manager = KafkaManager(brokers, 'fgc_test')
    content = generate_string(message_size)
    print(f"topic: {topic}")
    print(f"content: {len(content)}")
    for _ in range(0, test_count):
        start_time = time.time_ns()
        for __ in range(0, message_count):
            kafka_manager.send_msg(topic, content)
        end_time = time.time_ns()
        print(f"cost time: {end_time - start_time}")
    kafka_manager.producer.flush()


def receive_callback(topic, key, value, data):
    if data["count"] == 0:
        data['start_time'] = time.time_ns()

    data["count"] = data["count"] + 1
    if data["count"] >= 100000:
        data["count"] = 0
        end_time = time.time_ns()
        print(f"cost time: {end_time - data['start_time']}")


def consumer_test(brokers, group, topic):
    kafka_manager = KafkaManager(brokers, group)
    data = {
        "count": 0,
        "start_time": 0
    }
    kafka_manager.set_receive_msg_callback(receive_callback, data)
    kafka_manager.receive_msg(topic)


if __name__ == '__main__':
    producer_test('192.168.17.23:9092', 'fgc_test', 512, 100000, 10)
    print("======================")
    consumer_test('192.168.17.23:9092', 'fgc_test1', ['fgc_test'])

"""
import multiprocessing

import ujson

import time
from multiprocessing import current_process, Lock
from .common_func.msg_queue_count import MsgQueueCount, KAFKA_COUNT_FILE, timestamp_2_str
from .u_crypto import UCrypto
from .u_exception import exception_capture_decorator, ErrorLogger
from .common_func.print_logger import PrintLogger as Logger
import_error_warned = False
try:
    from confluent_kafka import Producer, Consumer
except ModuleNotFoundError:
    if not import_error_warned:
        Logger.warning("UKafka is not valid, because require module:[confluent-kafka] is not installed. "
                       "If you want use UKafka, please install [confluent-kafka] first")
        import_error_warned = True


SSL_CA_LOC = "ssl.ca.location"
SSL_CERT_LOC = "ssl.certificate.location"
SSL_KEY_LOC = "ssl.key.location"
KAFKA_COUNT_FILE_LOCK = Lock()

class _PrintLogger:
    debug = print
    info = print
    warning = print
    error = print


class UKafka(object):
    def __init__(self, conn_config: dict, logger=None):
        """
        :param conn_config 连接配置，包含以下配置项
               bootstrap_servers: kafka服务，示例：127.0.0.1:9092
               group_id: kafka客户端组标识
               client_id: 生产者客户端ID
               producer_cfg: 生产者配置
               consumer_cfg: 消费者配置
               encrypt_type: 加密传输模式，支持AES DES加密, 参数为空表示不加密
        :param logger: 日志记录工具，必须是BaseLogManager对象
        """
        self.bootstrap_servers = conn_config.get("bootstrap_servers")  # kafka服务
        self.group_id = conn_config.get("group_id", "")  # kafka客户端组
        self.client_id = conn_config.get("client_id", "")
        self.receive_msg_callback = None  # 消费者在阻塞接收消息时的回调接口函数
        self.receive_msg_callback_data = None  # receive_msg_callback函数的自定义数据
        self.flush_threshold = 10000  # 发送消息刷新阈值，当连续发送消息数大于阈值时，刷新下缓存
        self.produce_msg_count = 0  # 生产消息计数
        self.producer = None  # 生产者，第一次发送消息时创建
        self.consumer = None  # 消费者，第一次接收消息时创建
        self.debug_mode = False  # 调试模式设置
        self.encrypt_type = conn_config.get("encrypt_type", "")

        self.logger = logger
        self.producer_cfg = conn_config.get("producer_cfg", dict())
        self.producer_cfg['bootstrap.servers'] = self.bootstrap_servers
        self.producer_cfg['client.id'] = self.client_id
        self.producer_cfg['on_delivery'] = self._delivery_report

        self.consumer_cfg = conn_config.get("consumer_cfg", dict())
        self.consumer_cfg['bootstrap.servers'] = self.bootstrap_servers
        self.consumer_cfg['group.id'] = self.group_id

        self.logger.debug(f"config info: {conn_config}")

        self.logger.debug(f"convert info: producer cfg: {self.producer_cfg}\nconsumer cfg: {self.consumer_cfg}")
        cur_process = multiprocessing.current_process()
        logger_name = cur_process.name
        if logger is not None:
            ErrorLogger.add_logger(logger_name, logger)

    def reg_logger(self, logger):
        """
        register a new logger
        """
        cur_process = multiprocessing.current_process()
        logger_name = cur_process.name
        if logger is not None:
            ErrorLogger.add_logger(logger_name, logger)
        self.logger = logger

    def _delivery_report(self, err, msg):
        if err:
            self.logger.error('Message delivery failed: {}'.format(err))
        else:
            self.logger.debug(f'Message key: {msg.key()}, partition: [{msg.partition()}] delivered to {msg.topic()}')
            self.logger.debug(f"Message content: {msg.value()}")


    def set_producer_cfg(self, cfg: dict):
        """
        使用新配置替换原有生产者配置
        :param cfg: 生产者配置对象
        """
        self.producer_cfg = cfg

    def set_consumer_cfg(self, cfg: dict):
        """
        使用新配置替换原有消费者配置
        :param cfg: 消费者配置对象
        """
        self.consumer_cfg = cfg

    def update_producer_cfg(self, cfg: dict):
        """
        更新原有生产者配置，没有则新增
        :param cfg: 生产者配置对象
        """
        self.producer_cfg.update(cfg)

    def update_consumer_cfg(self, cfg: dict):
        """
        更新原有消费者配置，没有则新增
        :param cfg: 消费者配置对象
        """
        self.consumer_cfg.update(cfg)

    def enable_debug_mode(self):
        """
        设置调试模式：输出原始消息
        """
        self.debug_mode = True

    def disable_debug_mode(self):
        """
        取消调试模式
        """
        self.debug_mode = False

    def send_msg(self, topic: str, msg: str):
        """
        发送一条消息，第一次发送消息会创建生产者，后续不会再重复创建
        :param topic: kafka主题
        :param msg: 消息内容
        :return: True-发送成功；False-发送失败
        """
        encrypted_msg = self._encrypt_msg(msg)
        return self._send_encrypted_msg(topic, "", encrypted_msg)

    def _encrypt_msg(self, msg: str):
        return UCrypto.encrypt(msg, encrypt_mode=self.encrypt_type.upper(),cipher_encoding="base64")

    def _decrypt_msg(self, msg):
        try:
            return UCrypto.decrypt(msg.value(), encrypt_mode=self.encrypt_type.upper(),cipher_encoding="base64")
        except Exception as e:
            return msg.value()

    @exception_capture_decorator(_return=False)
    def _send_encrypted_msg(self, topic: str, key: str, msg: str):
        """
        发送一条消息，第一次发送消息会创建生产者，后续不会再重复创建
        :param topic: kafka主题
        :param key: 消息KEY
        :param msg: 消息内容
        :return: True-发送成功；False-发送失败
        """
        if not isinstance(self.producer, Producer):
            self.producer = Producer(self.producer_cfg)

        try:
            self.producer.produce(topic, key=key, value=msg)
            self.produce_msg_count += 1
            if self.produce_msg_count % self.flush_threshold == 0:  # 刷新缓存
                self.producer.flush()
                self.produce_msg_count = 0
        except BufferError:  # 内部生产消息队列满
            self.logger.warning(f"topic {topic} kafka buffer error, error: {BufferError}")
            self.producer.flush()
            self._send_encrypted_msg(topic, key, msg)
            self.produce_msg_count = 1

        return True

    def get_msg(self, topics: list, timeout=0.01) -> list:
        """
        获取单条消息，获取成功即返回
        :return: topic: str, key: str, value: bytes(解析成功，返回解析后的数据；解析失败，返回原数据)
        """
        return self._get_msg(topics, timeout=timeout)

    @exception_capture_decorator(_return=[None, None, None], extra_msg="UKafka._get_msg")
    def _get_msg(self, topics: list, timeout=0.01) -> list:
        """
        获取单条消息，获取成功即返回
        :return: topic: str, key: str, value: bytes(解析成功，返回解析后的数据；解析失败，返回原数据)
        """
        # self.logger.debug(f"[UKafka]::_get_msg of topic:{topics}, timeout={timeout}")
        if self.consumer:
            # self.logger.debug("[UKafka]::consumer has init, call poll directly")
            msg = self.consumer.poll(timeout=timeout)
        else:
            self.logger.debug(f"[UKafka]::consumer has not init, init with config:{self.consumer_cfg} "
                              f"then call poll directly")
            self.consumer = Consumer(self.consumer_cfg)
            self.consumer.subscribe(topics)
            msg = self.consumer.poll(timeout=timeout)

        if not msg:
            # self.logger.debug("[UKafka]::no msg, return None")
            return [None, None, None]
        err_msg = msg.error()
        if err_msg:
            self.logger.error(f"[UKafka]::get msg error: error msg:{err_msg}")
            return [None, None, None]

        # self.logger.debug(f"topic: {msg.topic()}, key: {msg.key()}, value: {msg.value()}")

        topic = msg.topic()
        key = msg.key().decode() if msg.key() else ""
        data = self._decrypt_msg(msg)

        return [topic, key, data]

    def set_receive_msg_callback(self, callback, data=None):
        """
        设置消费者消息回调接口，接口参数: (topic: str, key: str, value: bytes, data: 自定义数据)
        :param callback: 消息回调接口
        :param data: 自定义数据
        """
        self.receive_msg_callback = callback
        self.receive_msg_callback_data = data

    def receive_msg(self, topics: list):
        """
        阻塞式接收消息，需要提前使用set_receive_msg_callback设置回调接口，收到消息后会调用设置的回调接口
        :param topics: 要监听的Topic列表，示例：["topic1", "topic2"]
        :return: 解析成功，返回解析后的数据；解析失败，返回原数据
        """
        return self._receive_msg(topics)

    @exception_capture_decorator(extra_msg="UKafka._receive_msg")
    def _receive_msg(self, topics: list):
        consumer = Consumer(self.consumer_cfg)
        consumer.subscribe(topics)

        while True:
            msg = consumer.poll(1.0)
            if msg is None:
                continue
            self._deal_single_msg(msg)

    @exception_capture_decorator(extra_msg="UKafka._deal_single_msg")
    def _deal_single_msg(self, msg):
        if self.debug_mode:
            self.logger.debug(f"topic: {msg.topic()}, key: {msg.key()}, value: {msg.value()}")

        topic = msg.topic()
        key = msg.key().decode() if msg.key() else ""
        data = self._decrypt_msg(msg)

        if self.receive_msg_callback:
            self.receive_msg_callback(topic, key, data, self.receive_msg_callback_data)


class UKafkaConsumer(UKafka):
    def __init__(self, conn_config: dict, topic, logger=None, res_msg_type: str = "json", count_unit: int = 10000,
                 count_file: str = KAFKA_COUNT_FILE, count_file_lock: Lock = None):
        super().__init__(conn_config, logger)
        self.res_msg_type = res_msg_type
        if isinstance(topic, str):
            self.topic = [topic]
        else:
            self.topic = topic
        self.count = 0
        self.count_unit = count_unit
        self.count_name = current_process().name.split("__")[0]
        self.count_file = count_file
        self.count_file_lock = count_file_lock
        if self.count_file_lock is None:
            self.count_file_lock = KAFKA_COUNT_FILE_LOCK

    def poll(self, timeout=None):
        data = None
        if not timeout:
            timeout = 0.01
        try:
            _, _, data = self.get_msg(self.topic, timeout=timeout)
            if not data:
                return None
            self.poll_count()
            if self.res_msg_type == "json":
                return ujson.loads(data)
            else:
                return data
        except Exception as e:
            self.logger.debug(f"[UKafkaConsumer]::recv failed, retry, error: {e}, "
                              f"error line:{e.__traceback__.tb_lineno}")
            try:
                _, _, data = self.get_msg(self.topic, timeout=timeout)
                if not data:
                    return None
                self.poll_count()
                if self.res_msg_type == "json":
                    return ujson.loads(data)
                else:
                    return data
            except Exception as e:
                self.logger.error(f"[UKafkaConsumer]::recv failed and retry still failed, recv data: {data}, "
                                  f"error: {e}, error line:{e.__traceback__.tb_lineno}")
                return None
            
    def set_count_name(self, name):
        self.count_name = name
        with self.count_file_lock:
            data = MsgQueueCount.get_count_content(self.count_file, self.logger)

            consume_data = data.get("consume", dict())
            last_count_info = consume_data.get(self.count_name, None)

            if last_count_info is None or not isinstance(last_count_info, dict):
                consume_data[self.count_name] = MsgQueueCount.get_init_count_info()
            else:
                cur_time = time.time()
                cur_str_time = timestamp_2_str(cur_time)
                last_count_info["last_record_time"] = cur_time
                last_count_info["last_record_str_time"] = cur_str_time
                consume_data[self.count_name] = last_count_info

            data["consume"] = consume_data

            MsgQueueCount.write_count(self.count_file, data)
            self.logger.debug(f"{self.count_name} Consumer count data: {data}")

    def poll_count(self):
        self.count += 1
        if self.count % self.count_unit == 0:
            self.count = 0
            self.commit_count()

    def commit_count(self):
        with self.count_file_lock:
            data = MsgQueueCount.get_count_content(self.count_file, self.logger)

            consume_data = data.get("consume", dict())
            last_count_info = consume_data.get(self.count_name, None)
            if last_count_info is None or not isinstance(last_count_info, dict):
                last_count_info = MsgQueueCount.get_init_count_info()
            count_info = MsgQueueCount.assembly_count_info(last_count_info, count_unit=self.count_unit)
            consume_data[self.count_name] = count_info
            data["consume"] = consume_data

            MsgQueueCount.write_count(self.count_file, data)
            self.logger.debug(f"{self.count_name} Consumer count data: {data}")


class UKafkaProducer(UKafka):
    def __init__(self, conn_config: dict, topic: str, logger=None, count_unit: int = 10000,
                 count_file: str = KAFKA_COUNT_FILE, count_file_lock: Lock = None):
        super().__init__(conn_config, logger)
        self.topic = topic
        self.last_send_time = time.time()
        self.count = 0
        self.count_unit = count_unit
        self.count_name = current_process().name.split("__")[0]
        self.count_file = count_file
        self.count_file_lock = count_file_lock
        if self.count_file_lock is None:
            self.count_file_lock = KAFKA_COUNT_FILE_LOCK

    def set_count_name(self, name):
        """
        设置count文件key
        :param name:
        :return:
        """
        self.count_name = name
        with self.count_file_lock:
            data = MsgQueueCount.get_count_content(self.count_file, self.logger)
    
            produce_data = data.get("produce", dict())
            last_count_info = produce_data.get(self.count_name, None)
            if last_count_info is None or not isinstance(last_count_info, dict):
                produce_data[self.count_name] = MsgQueueCount.get_init_count_info()
            else:
                cur_time = time.time()
                cur_str_time = timestamp_2_str(cur_time)
                last_count_info["last_record_time"] = cur_time
                last_count_info["last_record_str_time"] = cur_str_time
                produce_data[self.count_name] = last_count_info

            data["produce"] = produce_data

            MsgQueueCount.write_count(self.count_file, data)
            self.logger.debug(f"{self.count_name} Producer count data: {data}")
    
    
    def send_count(self):
        self.count += 1
        if self.count % self.count_unit == 0:
            self.count = 0
            self.commit_count()
    
    
    def commit_count(self):
        with self.count_file_lock:
            data = MsgQueueCount.get_count_content(self.count_file, self.logger)
    
            produce_data = data.get("produce", dict())
            last_count_info = produce_data.get(self.count_name, None)
            if last_count_info is None or not isinstance(last_count_info, dict):
                last_count_info = MsgQueueCount.get_init_count_info()
            count_info = MsgQueueCount.assembly_count_info(last_count_info, count_unit=self.count_unit)
            produce_data[self.count_name] = count_info
            data["produce"] = produce_data

            MsgQueueCount.write_count(self.count_file, data)
            self.logger.debug(f"{self.count_name} Producer count data: {data}")

    def send(self, msg):
        if not msg:
            return False

        try:
            if isinstance(msg, dict) or isinstance(msg, list):
                msg = ujson.dumps(msg)
            self.send_msg(self.topic, msg)
            self.send_count()
        except Exception as e:
            self.logger.debug(f"[UKafkaProducer]::topic {self.topic} send failed, retry, error: {e}, "
                              f"error line:{e.__traceback__.tb_lineno}")
            try:
                if isinstance(msg, dict) or isinstance(msg, list):
                    msg = ujson.dumps(msg)
                self.send_msg(self.topic, msg)
                self.send_count()
            except Exception as e:
                self.logger.error(f"[UKafkaProducer]::topic {self.topic} send failed and retry send still failed, send msg: {msg}, "
                                  f"error: {e}, error line:{e.__traceback__.tb_lineno}")
                return False

        return True
