package SparkStreaming;

import org.apache.spark.SparkConf;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.KafkaUtils$;

import java.util.HashMap;
import java.util.Map;

/**
 * 基于Kafka的实时wordcount程序
 */
public class JavaKafkaWordCount {

    public static void main(String[] args) {
        SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("JavaSparkStreaming");
        JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));

        //使用KafkaUtils.createStream()方法，创建针对Kafka的输入数据流
        Map<String, Integer> topicThreadMap = new HashMap<String, Integer>();
        topicThreadMap.put("WordCount", 1);

/*        JavaPairReceiverInputDStream<String,String> lines = KafkaUtils.cretaStream(
                jssc,
                "192.168.11.31:2181,192.168.11.32:2181,192.168.11.33:2181",
                "DefaultConsumerGroup",
                topicThreadMap
        );*/

        jssc.start();
        //jssc.awaitTermination();
        jssc.close();
    }

}
