package com.monkeyboy.demo.producer;

import org.apache.kafka.clients.producer.*;

import java.util.Properties;
import java.util.concurrent.Future;

/**
 * @Author Gavin
 * @date 2020.08.24 09:53
 */
public class ProducerSample {
    public final static String TOPIC_NAME = "first";

    public static void main(String[] args) throws Exception {
        //异步发送消息
//        producerSendAsync();
        //同步发送
//        producerSendSync();
        //带回调函数的发送消息
//        producerSendAsyncCallBack();
        // 异步发送带回调函数和Partition负载均衡
//        producerSendAsyncCallBackWithPartition();

        //带ssl的发送消息
//        producerSendAsyncWithSSL();

        //发送事务消息
        transaction();
    }

    /*
Producer异步发送演示
 */
    public static void transaction() {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.150:9092");
        properties.put(ProducerConfig.ACKS_CONFIG, "all");

        //数据达到16k会发送
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
        //当时间到达1ms的时候也会发送和上面两个结合起来使用
        properties.put(ProducerConfig.LINGER_MS_CONFIG, "1");
        //数据缓冲区，32M
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");

        //序列化设置
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");

        //开启事务的三项必须配置配置
        properties.put("transactional.id", "first-transactional");// 设置事务id
        properties.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
        properties.put(ProducerConfig.RETRIES_CONFIG, "1");

        //Producer的主对象
        Producer<String, String> producer = new KafkaProducer<>(properties);
        producer.initTransactions();
        try {
            producer.beginTransaction();
            //组织消息对象：ProducerRecord
            for (int i = 0; i < 100; i++) {
                ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC_NAME, "key:" + i, "value:" + i);
                producer.send(record);
            }
            System.out.println("睡眠一秒钟再提交事务");
            Thread.sleep(1000);
            producer.commitTransaction();
            //如果这里关闭的时候上面执行的时间没有达到1ms，那么消息会被清空，然后消息也不会进行发送
            producer.close();
        } catch (Exception e) {
            e.printStackTrace();
            producer.abortTransaction();//放弃事务
        }
    }

    public static void producerSendAsyncWithSSL() throws Exception {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.150:8989");
        properties.put(ProducerConfig.ACKS_CONFIG, "all");
        properties.put(ProducerConfig.RETRIES_CONFIG, "0");
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
        properties.put(ProducerConfig.LINGER_MS_CONFIG, "1");
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        //ssl配置
        properties.put("security.protocol", "SSL");
        properties.put("ssl.endpoint.identification.algorithm", "");//不需要域名检查
        properties.put("ssl.truststore.location", "F:\\idea_workspace\\springboot_stack-master\\myproject-kafka\\client.truststore.jks");
        properties.put("ssl.truststore.password", "123456");

        //Producer的主对象
        Producer<String, String> producer = new KafkaProducer<>(properties);
        //组织消息对象：ProducerRecord
        for (int i = 0; i < 100; i++) {
            ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC_NAME, "key:" + i, "value:" + i);
            producer.send(record);
        }
        producer.close();
    }

    /*
    异步发送带回调函数和Partition负载均衡
     */
    public static void producerSendAsyncCallBackWithPartition() throws Exception {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.150:9092");
        properties.put(ProducerConfig.ACKS_CONFIG, "all");
        properties.put(ProducerConfig.RETRIES_CONFIG, "0");
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
        properties.put(ProducerConfig.LINGER_MS_CONFIG, "1");
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        //添加自定义自定义Partition配置
        properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "com.monkeyboy.demo.producer.SamplePartition");
        //Producer的主对象
        Producer<String, String> producer = new KafkaProducer<>(properties);
        //组织消息对象：ProducerRecord
        for (int i = 0; i < 10; i++) {
            ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC_NAME, "key:" + i, "value:" + i);
            producer.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    System.out.println("partition: " + recordMetadata.partition() + ",offset: " + recordMetadata.offset());
                }
            });
        }
        producer.close();
    }


    /*
      Producer异发送带回调
       */
    public static void producerSendAsyncCallBack() throws Exception {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.150:9092");
        properties.put(ProducerConfig.ACKS_CONFIG, "all");
        properties.put(ProducerConfig.RETRIES_CONFIG, "0");
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
        properties.put(ProducerConfig.LINGER_MS_CONFIG, "1");
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        //Producer的主对象
        Producer<String, String> producer = new KafkaProducer<>(properties);
        //组织消息对象：ProducerRecord
        for (int i = 0; i < 10; i++) {
            ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC_NAME, "key:" + i, "value:" + i);
            producer.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    System.out.println("partition: " + recordMetadata.partition() + ",offset: " + recordMetadata.offset());
                }
            });
        }
        producer.close();
    }

    /*
      Producer异步阻塞发送
       */
    public static void producerSendSync() throws Exception {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.150:9092");
        properties.put(ProducerConfig.ACKS_CONFIG, "all");
        properties.put(ProducerConfig.RETRIES_CONFIG, "0");
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
        properties.put(ProducerConfig.LINGER_MS_CONFIG, "1");
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        //Producer的主对象
        Producer<String, String> producer = new KafkaProducer<>(properties);
        //组织消息对象：ProducerRecord
        for (int i = 0; i < 10; i++) {
            ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC_NAME, "key:" + i, "value:" + i);
            Future<RecordMetadata> send = producer.send(record);
            RecordMetadata recordMetadata = send.get();//get方法会阻塞，这就是和下面异步发送的最主要的区别
            System.out.println("partition: " + recordMetadata.partition() + ",offset: " + recordMetadata.offset());
        }
        producer.close();
    }

    /*
    Producer异步发送演示
     */
    public static void producerSendAsync() throws Exception {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.150:9092");
        properties.put(ProducerConfig.ACKS_CONFIG, "all");
        properties.put(ProducerConfig.RETRIES_CONFIG, "0");

        //数据达到16k会发送
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
        //当时间到达1ms的时候也会发送和上面两个结合起来使用
        properties.put(ProducerConfig.LINGER_MS_CONFIG, "1");
        //数据缓冲区，32M
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");

        //序列化设置
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");

        //Producer的主对象
        Producer<String, String> producer = new KafkaProducer<>(properties);
        //组织消息对象：ProducerRecord
        for (int i = 0; i < 100; i++) {

            ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC_NAME, "key:" + i, "value:" + i);

            producer.send(record);
        }
        Thread.sleep(1000);
        //如果这里关闭的时候上面执行的时间没有达到1ms，那么消息会被清空，然后消息也不会进行发送
        producer.close();
    }
}
