package com.yang965.producer.kafka.manual;

import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.ExecutionException;

/**
 * 生产者
 * 官方api方式
 */
public class KafkaProducerDemo {

    public static final String bootstrapServers = "192.168.137.31:9092,192.168.137.32:9092,192.168.137.33:9092";
    public static final String topic = "topic-demo";
    public static final String clientId = "producer-client-demo-1";

    public static Properties initConfig() {
        Properties properties = new Properties();
        // kafka集群broker地址
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        // key序列化器
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // value序列化器
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // 客户端id 如果不设置会自动生成（producer-1, producer-2）
        properties.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
        // acks 字符串类型 默认： “1”
        // “1” 可靠性和吞吐量折中方案 leader副本成功写入，即会收到服务端成功响应
        // “0” 吞吐量最大 生产者发送消息后不需要等待任何服务的响应
        // "-1" 可靠性强 需要等待ISR中所有副本成功写入
        properties.put(ProducerConfig.ACKS_CONFIG, "-1");
        // 重试次数 默认：“0” 可重试遗产 网络遗产，leader副本选举等
        // retry.backoff.ms 重试之间的间隔
        // 保证顺序 建议 max.in.flight.requests.per.connection 配置为 “1”
        // max.in.flight.requests.per.connection 每个连接最多缓存的请求数
        properties.put(ProducerConfig.RETRIES_CONFIG, "0");
        // 自定义拦截器 可以配置多个形成拦截链，按顺序一一执行。如果某个拦截器失败，下一个拦截器会接着从上一个执行成功的拦截器继续执行
        properties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, CustomInterceptor.class.getName());
        // compression.type： none,gzip,snappy,lz4 消息压缩可以极大地减少网络传输，如果对时延有一定的要求，则不推荐压缩
        return properties;
    }

    public static void main(String[] args) {
        Properties properties = initConfig();
        // KafkaProducer 线程安全
        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        for (int i = 0; i < 20; i++) {
            // 如果指定key，同一个key的消息会被发送到同一个分区
            // ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topic, "key-test", "hello, kafka");
            ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topic, "hello, kafka");
            // sync(producer, producerRecord);
            async(producer, producerRecord);
        }

        try {
            // 休眠， 等待异步回调打印
            Thread.sleep(5000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

        // 关闭生产者
        producer.close();
    }

    // 同步
    private static void sync(KafkaProducer<String, String> producer, ProducerRecord<String, String> producerRecord) {
        // 发后即忘
        // producer.send(producerRecord);
        try {
            RecordMetadata recordMetadata = producer.send(producerRecord).get();
            System.out.println("发送" + recordMetadata.toString());
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (ExecutionException e) {
            e.printStackTrace();
        }
    }

    /**
     * 异步回调  回调会保证分区有序
     * @param producer
     * @param producerRecord
     */
    private static void async(KafkaProducer<String, String> producer, ProducerRecord<String, String> producerRecord) {
        producer.send(producerRecord, (recordMetadata, e) -> {
            if (e != null) {
                e.printStackTrace();
            } else {
                System.out.println(recordMetadata.toString());
            }
        });
    }

}
