package com.hxq.demo6;

import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.KafkaAdminClient;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.config.SaslConfigs;

import java.util.HashMap;
import java.util.Properties;
import java.util.concurrent.ExecutionException;

/**
 * @author hxq
 * @date 2022/4/14 17:39
 */
//@Slf4j
public class SASLProducer {

    public static void main(String[] args) throws InterruptedException, ExecutionException {

        haveCallback();

    }


    /**
     * 回调函数会在 producer 收到 ack 时调用，为异步调用，该方法有两个参数，分别是
     * RecordMetadata 和 Exception，如果 Exception 为 null，说明消息发送成功，如果
     * Exception 不为 null，说明消息发送失败。
     * 注意：消息发送失败会自动重试，不需要我们在回调函数中手动重试。
     */
    private static void haveCallback(){

        Properties properties = setConfigInfo();
        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);

        //发送数据
//        for (int i= 1; i<=4; i++){
//            ProducerRecord<String, String> record = new ProducerRecord<>("test",
//                    Integer.toString(i), "xxxxxxxxxx");
//            producer.send(record, new Callback() {
//                @Override
//                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
//                    if (e==null){
//                        System.out.println("分区："+recordMetadata.partition()+"=====" + "游标："+recordMetadata.offset());
//                    }else {
//                        e.printStackTrace();
//                    }
//
//                }
//            });
//        }

//        String msg = "{\"topic\": \"test\",\"key\": \"key1\",\"value\": \"value1\"}";
        String msg = "{\"topic\": \"test\",\"key\": \"key1\",\"value\": \"value2\",\"delay\": \"2\"}";

        ProducerRecord<String, String> record = new ProducerRecord<>("delay-minutes-1",
                    "key1", msg);

                    producer.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    if (e==null){
                        System.out.println("分区："+recordMetadata.partition()+"=====" + "游标："+recordMetadata.offset());
                    }else {
                        e.printStackTrace();
                    }

                }
            });



//        producer.close();

        while (true){

        }

    }



    private static Properties setConfigInfo(){
        Properties properties = new Properties();
        //kafka集群
        properties.put("bootstrap.servers","VM-12-12-ubuntu:9092");
        //ack
        properties.put("ack","all");
        //重试次数
        properties.put("retries",1);
        //批次大小  本版本是133  4条10字节的消息
//        properties.put("batch.size",133);//只有数据积累到 batch.size 之后，sender线程才会发送数据
        //等待时间
//        properties.put("linger.ms",10000);//如果数据迟迟未达到 batch.size，sender 等待 linger.time 之后就会发送数据 ms
        //RecordAccumulator 缓冲区大小
        properties.put("buffer.memory", 33554432);

        properties.put("key.serializer",
                "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer",
                "org.apache.kafka.common.serialization.StringSerializer");

        //设置SASL连接
        properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        properties.put(SaslConfigs.SASL_MECHANISM, "SCRAM-SHA-512");
        //方式一
        properties.put("sasl.jaas.config","org.apache.kafka.common.security.scram.ScramLoginModule required username=\"writer\" password=\"123456\";");
        //方式二
//        System.setProperty("java.security.auth.login.config","kafka_client_jaas.conf");

        return properties;

    }
}
