package com.peng.kafka;

import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.ExecutionException;

/**
 * KafkaProducer
 *
 * @author: lupeng6
 * @create: 2023/3/18 13:54
 */
public class MyKafkaProducer {

    private static final String TOPIC_NAME = "sample2";

    public static void main(String[] args) throws ExecutionException, InterruptedException {
        Properties props = new Properties();
        // kafka集群broker列表
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.14:9092,192.168.1.14:9093,192.168.1.14:9094");
        // 消息key的序列化方式
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // 消息value的序列化方式
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // ack=0 ack=1 ack=-1
        props.put(ProducerConfig.ACKS_CONFIG, "1");
        // 发送失败会重试，默认重试间隔100ms，重试能保证消息发送的可靠性，但有可能造成消息重复发送，没有收到ack进行重试
        props.put(ProducerConfig.RETRIES_CONFIG, 3);
        // 重试的时间间隔
        props.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, 300);
        // kafka会自动创建一块缓冲区发送消息的时候先放入缓冲区
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
        // kafka本地线程会从缓冲区中一次拉取指定数量的数据发送给broker, 16K
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
        // 如果拉取不到16K的数据10ms也会将已经拉取到的数据发送broker
        props.put(ProducerConfig.LINGER_MS_CONFIG, 10);


        try (Producer<String, String> producer = new KafkaProducer<>(props)) {
            for (int i = 0; i < 100; i++) {
                ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC_NAME, "key" + i, "value" + i);
                RecordMetadata metadata = producer.send(record).get();
                System.out.printf("同步发送结果topic: %s partition: %s%n", metadata.topic(), metadata.partition());
                Thread.sleep(3000);
            }
        }
    }
}
