package org.niit.kafka;

/*
  通过配置生产者的参数 提供生产者生产效率 从而提供生产者吞吐量

 */

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

public class KafkaProducerParameters {

    public static void main(String[] args) throws ExecutionException, InterruptedException {
        //1.连接的配置信息
        Properties pros = new Properties();
        pros.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"node1:9092");
        // -1:和all等价    1:是用传输日志文件 普通数据   -1：用来传输和钱相关的数据
        pros.put(ProducerConfig.ACKS_CONFIG,"all");
        pros.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        pros.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());

        //设置缓冲区的大小，默认是32K   默认 B
        pros.put(ProducerConfig.BUFFER_MEMORY_CONFIG,32*1024*1024*2);
        //批次大小 默认单位 B
        pros.put(ProducerConfig.BATCH_SIZE_CONFIG,16*1024*10);
        //等待时间
        pros.put(ProducerConfig.LINGER_MS_CONFIG,1000*2);
        //设置压缩方式
        pros.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,"snappy");
        //重试次数
        pros.put(ProducerConfig.RETRIES_CONFIG,3);

        //2.创建生产者
        KafkaProducer<String, String> producer = new KafkaProducer<>(pros);

        //3.发送数据
        for (int i=0;i<99999;i++){
            ProducerRecord<String,String> record = new ProducerRecord<String, String>("BD1","BD1_1"+i);
            Future<RecordMetadata> future = producer.send(record);
            future.get();
            System.out.println("第"+(i+1)+"条数据发送成功");
            Thread.sleep(1000);
        }

        producer.close();




    }

}
