package com.roy.sparkDemos.streaming;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import scala.Tuple2;

import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;

import static org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe;

public class JavaKafkaWordCount {

    /**
     * ./spark-submit --master yarn --class "com.roy.sparkDemos.streaming.JavaKafkaWordCount" /home/voltdb/sparkapp/sparkDemos.jar "voltdb_test01:2181,voltdb_test02:2181,voltdb_test03:2181" "my-comsumer-group" "sp_test" 2
     *
     * @param args
     */
    public static void main(String[] args) throws InterruptedException {
        if (null == args || args.length != 4) {
            System.out.println("Usage: JavaKafkaWordCount <zkQuorum> <group> <topics> <numThreads>");
            System.exit(1);
        }
        String zkQuorumParam = args[0];
        String kafkaGroupParam = args[1];
        String allTopics = args[2];
        int numThreads = Integer.parseInt(args[3]);

        SparkConf sparkConf = new SparkConf();
        sparkConf.setAppName("JavaKafkaWordCount").setMaster("local[4]");
        JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(200));

        //spark-streaming-kafka-0-10_*中已经没有了整个函数，只有-0-8里有
        //Map<String, Integer> topicMap = new HashMap<>();
        //Arrays.asList(allTopics.split(" ")).stream().map(topic -> topicMap.put(topic, numThreads));

        //JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(jssc, zkQuorumParam, kafkaGroupParam, topicMap);
//
//        JavaDStream<String> lines = messages.map(tuple2 -> tuple2._2);
//
//        JavaDStream<String> words = lines.flatMap(message -> Arrays.asList(message.split(" ")).iterator());
//
//        JavaPairDStream<String, Integer> wordCounts = words.mapToPair(word -> new Tuple2<>(word, 1)).reduceByKey((num1, num2) -> num1 + num2);
//
//        wordCounts.print();

        Map<String, Object> kafkaParams = new HashMap<>();
        kafkaParams.put("bootstrap.servers", zkQuorumParam);
        kafkaParams.put("key.deserializer", StringDeserializer.class);
        kafkaParams.put("value.deserializer", StringDeserializer.class);
        kafkaParams.put("group.id", kafkaGroupParam);
        kafkaParams.put("auto.offset.reset", "latest");
        kafkaParams.put("enable.auto.commit", false);
        Collection<String> topics = Arrays.asList(allTopics.split(","));

        //这个返回的是 record -> record.key(),record.value()格式， 为kafka的message ID和内容
        JavaInputDStream<ConsumerRecord<String, String>> dStream = KafkaUtils.createDirectStream(jssc,
                LocationStrategies.PreferConsistent(),
                ConsumerStrategies.Subscribe(topics, kafkaParams));
        //只需要获取kafka message.
        JavaDStream<String> lines = dStream.map(record -> record.value());
        //将一行文字用空格分割
        JavaDStream<String> words = lines.flatMap(line -> Arrays.asList(line.split(" ")).iterator());

        JavaPairDStream<String, Integer> wordCounts = words.mapToPair(word -> new Tuple2<>(word, 1)).reduceByKey((num1, num2) -> num1 + num2);

        wordCounts.print();


        jssc.start();
        jssc.awaitTermination();
    }

}
