package com.raos.kafka.commit;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

/**
 * kafka入门之 手动提交确认 -- 生产者
 *
 * @author raos
 * @emil 991207823@qq.com
 * @date 2021/6/19 21:45
 */
public class CommitProducer {

    public static void main(String[] args) {
        Properties props = new Properties();
        // 单机版连接 与集群版连接 二选一
//        props.put("bootstrap.servers","192.168.235.102:9092");
        props.put("bootstrap.servers", "192.168.235.102:9093,192.168.235.102:9094,192.168.235.102:9095");
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        // 0 发出去就确认 | 1 leader 落盘就确认| all 所有Follower同步完才确认
        props.put("acks", "1");
        // 异常自动重试次数
        props.put("retries", 3);
        // 多少条数据发送一次，默认16K
        props.put("batch.size", 16384);
        // 批量发送的等待时间
        props.put("linger.ms", 5);
        // 客户端缓冲区大小，默认32M，满了也会触发消息发送
        props.put("buffer.memory", 33554432);
        // 获取元数据时生产者的阻塞时间，超时后抛出异常
        props.put("max.block.ms", 3000);

        Producer<String, String> producer = new KafkaProducer<>(props);

        for (int i = 0; i < 100; i++) {
            producer.send(new ProducerRecord<>("commit-test", Integer.toString(i), Integer.toString(i)));
        }
        producer.close();
    }

}
