package SparkStreaming;

import kafka.serializer.StringDecoder;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Tuple2;

import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;

/**
 * 基于Kafka Direct方式的实时wordcount程序
 */
public class JavaKafkaDirectWordCount {

    public static void main(String[] args) {
        SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("JavaKafkaDirectWordCount");
        JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));

        //首先，要创建一份kafka参数map
        Map<String, String> kafkaParams = new HashMap<>();
        kafkaParams.put("metadata.broker.list",
                "192.168.11.31:9092,192.168.11.32:9092,192.168.11.33:9092");
        //然后，要创建一个set，里面放入你要读取的topic
        //这个，就是我们所说的，它自己给你做的很好了，可以并行读取多个topic
        Set<String> topics = new HashSet<>();
        topics.add("WordCount");

        //创建输入DStream
        JavaPairInputDStream<String, String> lines = KafkaUtils.createDirectStream(
                jssc,
                String.class,
                String.class,
                StringDecoder.class,
                StringDecoder.class,
                kafkaParams,
                topics);

        //执行wordcount操作
        JavaDStream<String> words = lines.flatMap(
                (FlatMapFunction<Tuple2<String, String>, String>) tuple -> {
                    return null;
                    //return Arrays.asList(tuple._2.split(" "));
                }
        );

        JavaPairDStream<String, Integer> pairs = words.mapToPair(
                (PairFunction<String, String, Integer>) word -> new Tuple2<String, Integer>(word, 1)
        );

        JavaPairDStream<String, Integer> wordCounts = pairs.reduceByKey(
                (Function2<Integer, Integer, Integer>) (v1, v2) -> v1 + v2
        );

        wordCounts.print();

        jssc.start();
        //jssc.awaitTermination();
        jssc.close();
    }
    //编写完成之后，在spark1集群上，创建一个topic为WordCount的kafka
    //然后执行该main方法

}
