package com.rock.code.province.tj.busi.demo;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

/**
 * kafka生产者类
 * @author lvfang
 *
 */
public class SendDataToKafka {

    public void send(String topic,String key,String data){
        //创建生产者
        Properties props = new Properties();
        props.put("bootstrap.servers", "192.168.43.74:9092");
        props.put("acks", "all"); //所有follower都响应了才认为消息提交成功，即"committed"
        props.put("retries", 0);  //retries = MAX 无限重试，直到你意识到出现了问题:)
        props.put("batch.size", 16384); //producer将试图批处理消息记录，以减少请求次数.默认的批量处理消息字节数,batch.size当批量的数据大小达到设定值后，就会立即发送，不顾下面的linger.ms
        props.put("linger.ms", 1);  //延迟1ms发送，这项设置将通过增加小的延迟来完成--即，不是立即发送
        props.put("buffer.memory", 33554432);  //producer可以用来缓存数据的内存大小。
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//        props.put("serializer.class", "kafka.serializer.StringEncoder");
        KafkaProducer<String, String> producer = new KafkaProducer<String,String>(props);
        for(int i = 1; i<2; i++){
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            producer.send(new ProducerRecord<String, String>(topic, ""+i, data));
        }
        producer.close();
    }

    public static void main(String[] args) {
        SendDataToKafka sendDataToKafka = new SendDataToKafka();
        sendDataToKafka.send("test", "", "this is a test data too");
    }

}
