from confluent_kafka import Producer
from confluent_kafka import KafkaException
from utils.logger import logger  # ✅ 引入封装的 logger
import socket
import config
import threading
import time

class KafkaProducerWrapper:
    def __init__(self):
        self.producer = Producer({
            'bootstrap.servers': config.KAFKA_BROKER,
            'client.id': config.Producer_ID or socket.gethostbyname(socket.gethostname())
        })
        logger.info("✅ Kafka Producer 初始化完成")

    # 回调函数
    def delivery_report(self,err,msg):
        """统一的回调处理函数"""
        if err is not None:
            logger.error(f"❌ Failed to deliver message: {msg.value()}: {err}")
        else:
            logger.success(f"✅ Message delivered: {msg.value()}")


    # 正常发送
    def send(self, value: str, topic: str = None,retries: int = 3):
        """发送单条消息"""
        topic = topic or config.TOPIC_NAME
        for attempt in range(retries):
            #  给kafka发送消息--异步请求
            try:
                self.producer.produce(
                    topic=topic,
                    value=value.encode("utf-8"),
                    callback=self.delivery_report
                )
                self.producer.poll(0) # 非阻塞触发回调处理
                logger.debug(f"📤 Message queued to topic={topic}")
                return  # 成功就退出循环
            except BufferError as e:
                logger.warning(f"⚠️ Attempt {attempt + 1}: Buffer full, retrying... ({e})")
                time.sleep(0.5)  # 等 Kafka 缓冲释放
            except KafkaException as e:
                logger.error(f"❌ KafkaException during produce: {e}")
                break
            except Exception as e:
                logger.exception(f"❗ Unexpected error: {e}")
                break
        else:
            logger.critical(f"🚨 Failed to send message after {retries} retries.")

    # 通常，应该在关闭生产者之前调用flush()，以确保所有未完成的/排队的/in-flight的消息都被传递。
    def flush(self):
        """阻塞直到所有消息发送完成"""
        logger.debug("🔄 Flushing messages...")
        self.producer.flush()
        logger.debug("✅ Flush complete")


    # 同步的批量发送，一条一条发送
    def send_batch(self, messages, topic: str = None):
        """发送一批消息"""
        logger.info(f"📦 Sending batch of {len(messages)} messages...")
        for msg in messages:
            # 每条消息调用 self.send(msg) 发出。
            self.send(msg, topic)
        # 最后 flush() 把缓冲区里的消息全部强制推送出去（Kafka Producer 是异步的）。
        self.flush()


    # 异步的批量发送
    def send_async(self, messages, thread_count=4):
        """并发发送消息"""
        logger.info(f"🚀 Sending {len(messages)} messages asynchronously with {thread_count} threads.")

        def worker(batch):
            for msg in batch:
                self.send(msg)
            self.flush()

        # 强制让每个线程至少拿 1 条，避免线程“空跑”。主要防范messages小于thread的时候
        # 让每个线程分到的消息尽量平均，但至少保证每个线程有 1 条任务。
        batch_size = max(1, len(messages) // thread_count)  # 2
        threads = []
        for i in range(thread_count):
            # batch: ['message 0','message 1,'message 2']
            start = i * batch_size
            end = (i + 1) * batch_size if i < thread_count - 1 else len(messages)
            batch = messages[start:end]

            # 创建线程对象
            # target=worker：指定线程启动后要执行的函数，就是 worker()
            # args=(batch,)：把 batch 这个参数传给 worker(batch)
            t = threading.Thread(target=worker, args=(batch,))
            # 启动线程，线程一运行就会自动执行 worker(batch)
            t.start()
            # 保存线程引用，便于管理
            threads.append(t)
        #  阻塞主线程，等所有线程结束
        for t in threads:
            t.join()

        logger.info("✅ All async threads completed.")

