//package com.tod.spark.springbootspark.streaming;
//
//import kafka.serializer.StringDecoder;
//import org.apache.spark.SparkConf;
//import org.apache.spark.streaming.Durations;
//import org.apache.spark.streaming.api.java.JavaDStream;
//import org.apache.spark.streaming.api.java.JavaPairDStream;
//import org.apache.spark.streaming.api.java.JavaPairInputDStream;
//import org.apache.spark.streaming.api.java.JavaStreamingContext;
//import org.apache.spark.streaming.kafka.KafkaUtils;
//import scala.Tuple2;
//
//import java.util.*;
//import java.util.regex.Pattern;
//
//public class KafkaReceiverWordCount {
////    public static void main(String[] args){
////        SparkConf conf = new SparkConf().setAppName("workcount").setMaster("local[2]");
////        JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));
////
////        //这比较重要，是对应你给topic用几个线程拉取数据
////        HashMap<String, Integer> toplicTheadMap = new HashMap<>();
////        toplicTheadMap.put("201803021247",1);
////
////        //kafka这种创建的流是pair的形式有2个值，但第一个值通常都是Null
//////        JavaPairReceiverInputDStream<String, String> lines = KafkaUtils.createStream(jssc,
//////                "192.168.2.237:2181,192.168.2.205:2181,192.168.2.243:2181",
//////                "WordcountCousumerGroup",
//////                toplicTheadMap
//////        );
////
////        KafkaUtils.
////
////        JavaDStream<String> words = lines.flatMap((FlatMapFunction<Tuple2<String, String>, String>) tuple -> {
////            List<String> strings = Arrays.asList(tuple._2.split(" "));
////            return strings.iterator();
////        });
////
////        JavaPairDStream<String, Integer> pairs = words.mapToPair((PairFunction<String, String, Integer>) word -> new Tuple2<String, Integer>(word, 1));
////
////        JavaPairDStream<String, Integer> wordcounts = pairs.reduceByKey((Function2<Integer, Integer, Integer>) (v1, v2) -> v1 + v2);
////
////        wordcounts.print();
////
////        try {
////            jssc.start();
////            jssc.awaitTermination();
////        } catch (InterruptedException e) {
////            e.printStackTrace();
////        }finally {
////            jssc.close();
////        }
////
////    }
//
//    private static final Pattern SPACE = Pattern.compile(" ");
//
//    public static void main(String[] args) throws InterruptedException {
//        // Create context with a 2 seconds batch interval
//        SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("JavaDirectKafkaWordCount");
//        JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));
//
//        Set<String> topicsSet = new HashSet<>(Arrays.asList("201803021247"));
//        Map<String, String> kafkaParams = new HashMap<>();
//        kafkaParams.put("metadata.broker.list", "192.168.2.237:9092,192.168.2.205:9092,192.168.2.243:9092");
//
//        // Create direct kafka stream with brokers and topics
//        JavaPairInputDStream<String, String> messages = KafkaUtils.createDirectStream(
//                jssc,
//                String.class,
//                String.class,
//                StringDecoder.class,
//                StringDecoder.class,
//                kafkaParams,
//                topicsSet
//        );
//
//        // Get the lines, split them into words, count the words and print
//        JavaDStream<String> lines = messages.map(Tuple2::_2);
//        JavaDStream<String> words = lines.flatMap(x -> Arrays.asList(SPACE.split(x)).iterator());
//        JavaPairDStream<String, Integer> wordCounts = words.mapToPair(s -> new Tuple2<>(s, 1))
//                .reduceByKey((i1, i2) -> i1 + i2);
//        wordCounts.print();
//
//        // Start the computation
//        jssc.start();
//        jssc.awaitTermination();
//
//    }
//}
