package com.tiantian.kafka.learn_06_producer_ack;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

import static com.tiantian.kafka.Constants.PRODUCER_ACK_TOPIC;

/**
 * @ClassName KafkaProducerTest0
 * @Description acks:
 * 1、acks = 0  不等待broker确认
 * 2、acks = 1  等待leader确认
 * 3、acks = all 或者-1  等待leader和follower都确认
 * 【leader和follower是针对分区来说的。分区中是有leader和follower的概念，只能从分区leader去读写消息，follower只是同步数据】
 * @Author tant
 * @Date 2025/1/23 16:59
 */
@SuppressWarnings("all")
public class KafkaProducerTestAcks {

    public static void main(String[] args) throws ExecutionException, InterruptedException {
        // 1. 创建用于连接Kafka的Properties配置
        Properties props = new Properties();
        props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.217.132:9092");
        props.setProperty(ProducerConfig.ACKS_CONFIG, "all");
        //配置key和value的序列化方式
        props.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        props.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());

        // 2. 创建一个生产者对象KafkaProducer
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(props);

        int start = 10000 * 100;
        int end = 10000 * 100 * 10;
        //3. 发送消息  指定topic
        for (int i = start; i < end; i++) {
            //构建消息
            ProducerRecord<String, String> record = new ProducerRecord<String, String>(PRODUCER_ACK_TOPIC, null, "hello-kafka-" + i);
            //发送消息
            Future<RecordMetadata> future = kafkaProducer.send(record);
            //等待响应
            RecordMetadata metadata = future.get();
            System.out.printf("消息发送成功: topic: %s partition: %d offset: %d key: %s value: %s%n", metadata.topic(), metadata.partition(), metadata.offset(), record.key(), record.value());
            Thread.sleep(1000);
        }

        //4 关闭生产者
        kafkaProducer.close();
    }
}

    