# kafka_producer.py
import json
import time
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from kafka import KafkaProducer
import random


class KafkaParallelProducer:
    def __init__(self, bootstrap_servers, topics, max_workers=4):
        """
        初始化Kafka并行生产者

        Args:
            bootstrap_servers: Kafka服务器地址列表
            topics: 要写入的topic列表
            max_workers: 最大并发线程数
        """
        self.bootstrap_servers = bootstrap_servers
        self.topics = topics
        self.max_workers = max_workers

        # 创建Kafka生产者实例
        self.producer = KafkaProducer(
            bootstrap_servers=bootstrap_servers,
            value_serializer=lambda v: json.dumps(v).encode('utf-8'),
            acks='all',  # 等待所有副本确认
            retries=3,  # 重试次数
            batch_size=16384,  # 批处理大小
            linger_ms=10  # 发送前等待时间
        )

    def generate_sample_data(self, topic):
        """
        生成示例数据

        Args:
            topic: topic名称

        Returns:
            dict: 示例数据
        """
        if topic == 'bfd':
            return {
                "timestamp": int(time.time() * 1000),
                "device_id": f"device_{random.randint(1, 100)}",
                "bfd_status": random.choice(["up", "down", "init"]),
                "session_id": f"session_{random.randint(1000, 9999)}",
                "detection_time": random.randint(100, 1000)
            }
        elif topic == 'padna':
            return {
                "timestamp": int(time.time() * 1000),
                "source_ip": f"192.168.{random.randint(1, 255)}.{random.randint(1, 255)}",
                "destination_ip": f"10.0.{random.randint(1, 255)}.{random.randint(1, 255)}",
                "packet_count": random.randint(100, 10000),
                "bytes_transferred": random.randint(1024, 1024 * 1024),
                "protocol": random.choice(["TCP", "UDP", "ICMP"])
            }
        else:
            return {"message": f"Sample data for {topic}", "timestamp": time.time()}

    def send_message(self, topic, message):
        """
        发送单条消息到指定topic

        Args:
            topic: 目标topic
            message: 消息内容

        Returns:
            Future: 发送结果
        """
        try:
            future = self.producer.send(topic, value=message)
            return future
        except Exception as e:
            print(f"Error sending message to topic {topic}: {e}")
            return None

    def produce_messages(self, topic, count):
        """
        向指定topic生产指定数量的消息

        Args:
            topic: 目标topic
            count: 消息数量
        """
        print(f"Starting to produce {count} messages to topic '{topic}'")

        futures = []
        for i in range(count):
            # 生成示例数据
            data = self.generate_sample_data(topic)

            # 异步发送消息
            future = self.send_message(topic, data)
            if future:
                futures.append((future, topic, i))

            # 控制发送频率，避免过快
            if i % 100 == 0 and i > 0:
                time.sleep(0.01)

        # 等待所有消息发送完成
        success_count = 0
        for future, topic, index in futures:
            try:
                record_metadata = future.get(timeout=10)
                success_count += 1
                if index % 1000 == 0:
                    print(f"Sent message {index} to topic '{topic}' "
                          f"partition {record_metadata.partition} offset {record_metadata.offset}")
            except Exception as e:
                print(f"Failed to send message {index} to topic '{topic}': {e}")

        print(f"Finished producing messages to topic '{topic}'. Success: {success_count}/{count}")

    def parallel_produce(self, messages_per_topic=1000):
        """
        并行向多个topic生产数据

        Args:
            messages_per_topic: 每个topic生产的消息数量
        """
        print(f"Starting parallel production to topics: {self.topics}")

        # 使用线程池并行处理
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            # 提交任务
            future_to_topic = {
                executor.submit(self.produce_messages, topic, messages_per_topic): topic
                for topic in self.topics
            }

            # 等待所有任务完成
            for future in as_completed(future_to_topic):
                topic = future_to_topic[future]
                try:
                    future.result()
                    print(f"Completed production to topic '{topic}'")
                except Exception as e:
                    print(f"Error producing to topic '{topic}': {e}")

    def close(self):
        """
        关闭生产者连接
        """
        if self.producer:
            self.producer.flush()
            self.producer.close()


def main():
    # 配置Kafka服务器和topics
    bootstrap_servers = ['172.18.31.159:9092']  # 默认端口9092
    topics = ['bfd', 'padna']

    # 创建并行生产者实例
    producer = KafkaParallelProducer(
        bootstrap_servers=bootstrap_servers,
        topics=topics,
        max_workers=4
    )

    try:
        # 并行生产数据
        producer.parallel_produce(messages_per_topic=1000)
    finally:
        # 关闭连接
        producer.close()


if __name__ == "__main__":
    main()
