import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import scala.Tuple2;

import java.util.*;

import static org.apache.spark.streaming.Durations.seconds;

public class StreamMain {
    public static void main(String[] args) throws InterruptedException {
        //初始化SparkConf
        SparkConf sparkConf = new SparkConf();
        sparkConf.setAppName("SparkStream连接kafka的demo");
        //配置win本机启动
        String osName = System.getProperty("os.name");
        if(osName.toLowerCase().startsWith("win")){
            System.setProperty("HADOOPUSER_USER_NAME","hdfs");
            sparkConf.setMaster("local[1]");
        }
        //sparkConf.setMaster(),可以使用CommandLine获取参数配置
        //所有参数可配置化，可以使用单利模式
        String topics = "input_topic";
        String kafkagroup = "input_group";
        String kafkabrokers = "bfd-lugu-dev107:6667";

        Collection<String> topicsSet = new HashSet<>(Arrays.asList(topics.split(",")));
        //设置kafka相关信息
        //kafka相关参数，必要！缺了会报错
        Map<String, Object> kafkaParams = new HashMap<>();
        kafkaParams.put("metadata.broker.list", kafkabrokers) ;
        kafkaParams.put("bootstrap.servers", kafkabrokers);
        kafkaParams.put("group.id", kafkagroup);
        kafkaParams.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        kafkaParams.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        kafkaParams.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        kafkaParams.put("enable.auto.commit", false);
        //20S一个批次
        Duration secs = seconds(20);
        JavaStreamingContext streamingContext = new JavaStreamingContext(sparkConf, secs);

        JavaInputDStream<ConsumerRecord<Object,Object>> lines = KafkaUtils.createDirectStream(
                streamingContext,
                LocationStrategies.PreferConsistent(),
                ConsumerStrategies.Subscribe(topicsSet, kafkaParams)
        );
        //直接输出
        lines.foreachRDD(rdd -> {
            rdd.foreach(
                    x ->{
                        System.out.println(x);
                    }
            );
        });

        //简单的单词统计，受限于批次数量
        JavaPairDStream<String, Integer> counts = lines.flatMap(x -> Arrays.asList(x.value().toString().split(" ")).iterator())
                .mapToPair(x -> new Tuple2<String, Integer>(x, 1))
                .reduceByKey((x, y) -> x + y);
        counts.print();
        streamingContext.start();
        streamingContext.awaitTermination();
        streamingContext.close();

    }
}
