import csv
import time
from confluent_kafka import Producer

# 配置Kafka生产者
kafka_config = {
    'bootstrap.servers': '1.94.147.80:9092',  # 替换为你的Kafka服务器地址
    'client.id': 'your-client-id'  # 可选：设置客户端ID
}

# 创建Kafka生产者实例
producer = Producer(kafka_config)

# Kafka主题名称
topic = 'test'  # 替换为你实际的Kafka主题名称

# CSV文件路径
csv_file_path = 'data.csv'  # 替换为你的CSV文件路径

# 发送间隔时间（秒）
send_interval = 0.1  # 可以根据需要调整这个值


def delivery_report(err, msg):
    """ Called once for each message produced to indicate delivery result.
        Triggered by poll() or flush(). """
    if err is not None:
        print(f'Message delivery failed: {err}')
    else:
        print(f'Message delivered to {msg.topic()} [{msg.partition()}] at offset {msg.offset()} value {msg.value()}')


def send_to_kafka(line):
    # 在相邻两个数之间插入冒号分隔
    message = ':'.join(line)
    # 发送消息到Kafka
    producer.produce(topic, value=message.encode('utf-8'))
    # 确保所有异步操作完成
    producer.poll(0)


def main():
    while True:
        with open(csv_file_path, mode='r') as file:
            csv_reader = csv.reader(file)
            for row in csv_reader:
                # 发送当前行的数据到Kafka
                send_to_kafka(row[0:-1])
                # 等待指定的时间间隔
                time.sleep(send_interval)

        # 确保所有消息都被发送出去
        producer.flush()
        print("loop")


if __name__ == "__main__":
    main()
