package com.ysw.java_api;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;

/**
 * @ClassName MyProducer
 * @Description kafka生产者
 *  1. 记住 kafka 默认是异步发送的
 *  2. 但是send 的返回对象是 future 对象，所以我们可以阻塞来进行同步发送，使用比较少
 * @Author ysw
 * @Date 2021/12/12 14:54
 */
public class MyProducer {
    private static final String TOPIC = "first";

    public static void main(String[] args) throws InterruptedException {
        // 创建kafka配置信息
        Properties properties = new Properties();
        properties.put("bootstrap.servers","192.168.37.11:9092");
        // ack 级别
        properties.put("acks","1");
        properties.put("retries",2);
        // 16K
        properties.put("batch.size",16384);
        // 一毫秒发送
        properties.put("linger.ms",1);
        // 缓冲区大小
        properties.put("buffer.memory",33554432);

        properties.put("key.serializer","org.apache.kafka.common.serialization.StringSerializer");

        properties.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");

        //指定默认的分区器，一般情况下不用
        //properties.put("partitioner.class","com.ysw.java_api.partioner.MyPartioner");
        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);

        // sendCallBack(producer);
        //simpleSend(producer);
        pointPartitionSend(producer);
        /*
         * ProducerRecord<String, String> record = new ProducerRecord<>("first", "test" + i);
         * ProducerRecord 有好几种构造，
         * 1. 如果不指定分区指定key 的话，默认按照指定key 的hash取模发送到指定分区
         * 2. 如果不指定分区，也不指定key的话，默认轮询选择分区发送
         * 3. 如果指定分区的话，就发送到指定分区
         */
        //ProducerRecord<String, String> record = new ProducerRecord<>("first", "test" + i);
    }

    /**
     * 异步回调发送
     * @param producer
     */
    public static void sendCallBack(KafkaProducer producer){
        CountDownLatch countDownLatch = new CountDownLatch(10);
        for (int i = 0; i < 10; i++) {
            ProducerRecord<String, String> record = new ProducerRecord<>("first", "test" + i);
            producer.send(record,(metadata,exe)->{
                if(null == exe){
                    System.out.println(metadata.topic()+"---"+metadata.offset());
                }
                countDownLatch.countDown();
            });
        }
        try {
            countDownLatch.await();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    /**
     * 简单发送
     * @param producer
     */
    public static void simpleSend(KafkaProducer producer){
        for (int i = 0; i < 10; i++) {
            ProducerRecord<String, String> record = new ProducerRecord<>("first", "test" + i);
            Future send = producer.send(record);
            // 这里可以用同步发送，返回结果是future 则是可以阻塞，来看每条结果的发送结果，是否发送异常
        }
    }

    /**
     * 指定分区
     * @param producer
     * @throws InterruptedException
     */
    public static void pointPartitionSend(KafkaProducer producer) throws InterruptedException {
        CountDownLatch countDownLatch = new CountDownLatch(10);
        for (int i = 0; i < 10; i++) {
            /**
             * 指定分区发送
             */
            ProducerRecord<String, String> record = new ProducerRecord<>("first", 0, "p1", "test" + i);
            producer.send(record,((metadata, e) -> {
                System.out.println(metadata.topic()+"---"+metadata.offset()+"--"+metadata.partition());
                countDownLatch.countDown();
            }));

        }
        countDownLatch.await();
    }



}