package cn.cmss.collectlog;



import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import kafka.serializer.StringEncoder;

import java.io.*;
import java.util.Properties;


/**
 * 功能解释：从目标文件中监控log，通过kafka——producer接口发送到kafaka中
 *  topic：JsonData
 *
 * @Auther: Super Yang
 * @Date: 2019-02-19 14:40
 * @Description:
 */


public class TestProducer {
    public static void main( String[] args ) throws InterruptedException {
        //将参数层状成properties中
        Properties properties = new Properties ();
        //根据这个配置获取metadata，不必说kafka集群上所有的broker，但最好至少有两个
        properties.setProperty ( "metadata.broker.list","hadoop04:9092" );
        //消息传递到broker时的序列化方式
        properties.setProperty ( "serializer.class",StringEncoder.class.getName () );
        //zk集群
        properties.setProperty ( "zookeeper.connect","hadoop04:2181" );

        //是否获取反馈
        properties.setProperty("request.required.acks","1");

        ProducerConfig producerConfig = new ProducerConfig ( properties);
        Producer<String, String> producer = new Producer<String, String> ( producerConfig);
   // 读取日志文件
        try {
            BufferedReader bf = new BufferedReader(
                    new FileReader(
                            new File(
                                    "D:\\app\\ideaWorkSpace\\Spark\\cmcc.json")));
            String line = null;
            while((line=bf.readLine())!=null){
                System.out.println("line"+line );
                KeyedMessage<String,String> keyedMessage = new KeyedMessage<String,String>("JsonData",line);
                Thread.sleep(500);
                producer.send(keyedMessage);
            }
            bf.close();
            producer.close();
            System.out.println("已经发送完毕");

        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
