package org.example.kafka24;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

public class ProducerDemo {
    public static void main(String[] args) throws ExecutionException, InterruptedException {
        final Logger logger = LoggerFactory.getLogger(ProducerDemo.class);
        Properties pros = new Properties();
        pros.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        pros.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        pros.put("acs", "-1"); // leader 和 所有follower 都保存, 1 是只要leader保存就ack
        pros.put("bootstrap.servers", "192.168.1.101:9092");

        KafkaProducer<String, String> pr = new KafkaProducer<String, String>(pros);
        for (int i = 0; i < 100; i++) {
            ProducerRecord<String, String> record = new ProducerRecord<>("first", String.format("this is my %d message!", i));
            Future<RecordMetadata> future = pr.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    System.out.println(recordMetadata.topic() + "," + recordMetadata.partition() + "," + recordMetadata.offset());
                }
            });
            // future.get(); // 阻塞，直到获得消息
        }

        pr.close();
    }
}
