package com.wh.springkafka.producer;

import org.apache.kafka.clients.producer.*;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

public class ProducerSampleWasu {

    public static final String TOPIC_NAME = "adi_transform_hubeigd";

    public static void main(String[] args) {
        producerSend();
    }



    /*
        Producer异步发送演示
     */
    public static void producerSend(){
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"125.210.163.28:9092");
        /*
            acks
                0: 最多一次
                1：至少一次或者多次
                all：有且仅有一次 (携带Id发送，bower会去重如果存过则收到存入成功，则拒收并告诉已经收过)
                    耗时最久，但是最可靠的
         */
        properties.setProperty(ProducerConfig.ACKS_CONFIG,"all");
        // 重试
        properties.setProperty(ProducerConfig.RETRIES_CONFIG,"0");
        // 批次大小
        properties.setProperty(ProducerConfig.BATCH_SIZE_CONFIG,"16384");
        // 多长时间发送一个批次
        properties.setProperty(ProducerConfig.LINGER_MS_CONFIG,"1");
        // 缓存最大
        properties.setProperty(ProducerConfig.BUFFER_MEMORY_CONFIG,"33554432");

        // key 序列号
        properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
        // value 序列号
        properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
        // 自定义Partition 负载均衡
//        properties.setProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG,"com.wh.springkafka.producer.SamplePartition");
        // producer的主对象
        Producer<String,String> producer = new KafkaProducer<String, String>(properties);

//        for (int i = 0; i < 10; i++) {
        int i =1;
            // 消息对象- producerRecoder
            ProducerRecord<String,String> record =
                    new ProducerRecord<String,String>(TOPIC_NAME,"key-"+i,"value-"+i);

            Future<RecordMetadata> result = producer.send(record);
        try {
            System.out.println(result.get().topic());
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (ExecutionException e) {
            e.printStackTrace();
        }
//        }
        //所有通道打开都需要关闭
        producer.close();
    }
}
