package com.event.ingestion.kafka;


import com.event.ingestion.IngestionExecutor;
import com.event.ingestion.config.LoadConfig;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.PrintStream;
import java.util.Properties;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;

/**
 * @author ZzHh
 * @Classname EventProducer
 * @Description TODO
 * @Date 2020/6/12 10:35
 * @Create by IntelliJ IDEA
 **/

/**
 * 生产者业务代码
 * java -jar  com.event.ingestion.kafka.EventProducer [settings.xml  users  users.csv]
 */
public class EventProducer implements IngestionExecutor {

    @Override
    public void executor(String[] args) throws Exception {
        if (args.length != 3) {
            System.out.println("参数不匹配!");
        }else{
            //获取brokerUrl
            String brokerUrl = LoadConfig.loadSettings(args[0]).getProperty(LoadConfig.kafkaBrokerUrl);
            //指定TopicName
            String topic = args[1].toString();
            //指定要上传的文件
            String fileName = args[2].toLowerCase();

            //1.创建properties对象,封装producer需要的参数
            Properties properties = new Properties();
            //2.设置服务器url
            properties.put("bootstrap.servers",brokerUrl);
            //3.设置acks值
            properties.put("acks","1");
            //4.若发送失败,producer会自动重新发送,无需指定次数
            properties.put("retries",Integer.valueOf(0));
            //5.当多个消息需要发送到相同的分区时,生产者尝试将消息批量打包在一起,以减少请求交互,可以提高客户端和服务器的性能
            properties.put("batch.size",Integer.valueOf(16384));
            //6.假设batch为32kb,需要估算一下,正常情况下需要多久能够凑够batch,例如正常20ms就能凑够一个batch
            //则linger.ms就可以设置为25ms,正常情况下,大部分的batch在20ms内都会凑满,但是linger能够保证哪怕是
            //遇到了低峰时期,20ms凑不满一个batch,那么在25ms时仍会强制将batch发送出去
            //若linger.ms设置太小,比如默认就是0ms,或者设置为5ms,将可能导致batch虽然设置了32kb,但是经常会在未凑满数据时,
            //就将数据发送出去了,会导致batch形同虚设,一直凑不满数据
            properties.put("linger.ms",Integer.valueOf(10));
            //7.生产者用来缓存等待发送到服务器的消息的内容的总字节数
            properties.put("buffer.memory",Integer.valueOf(40000000));
            //8.key和value序列化
            properties.put("key.serializer","org.apache.kafka.common.serialization.StringSerializer");
            properties.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");

            //9.实例化instance producer
            Producer<String,String> producer = new KafkaProducer<String, String>(properties);
            try {
                BufferedReader br = new BufferedReader(new FileReader(fileName));
                try {
                    //记录消息总数
                    long key = 0L;
                    long count = 0L;
                    String line = br.readLine();
                    while (line != null) {
                        key += line.length() + 1;
                        //send data
                        //key = value[3197468391,id_ID,1993,male,2012-]
                        producer.send(new ProducerRecord<String, String>(topic,Long.toString(key),line));
                        count++;
                        line = br.readLine();
                    }
                    System.out.println("消息总数:" + count);
                }finally {
                    br.close();
                }
            }catch (Exception e) {
                e.printStackTrace();
            }finally {
                producer.close();
            }

        }
    }

}