package org.example.com.atguigu.day06;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.Seconds;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import scala.Int;
import scala.Tuple2;

import java.util.*;

public class WorldCount {
    public static void main(String[] args) throws InterruptedException {

        JavaStreamingContext ssc = new JavaStreamingContext(new SparkConf().setMaster("local[4]").setAppName("test"), Seconds.apply(5));

        // 读取数据的topic名称
        List<String> topics = new ArrayList<>();
        topics.add("spark_topic");
        // 消费者配置参数
        Map<String,Object> params = new HashMap<>();
        // 指定key的反序列化器
        params.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        // 指定Value的反序列化器
        params.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        // 指定kafka集群地址
        params.put("bootstrap.servers", "hadoop102:9092,hadoop103:9092,hadoop104:9092");
        // 指定消费者组的名称
        params.put("group.id", "spark001");
        // 指定消费者组第一次拉取数据的时候从哪里开始拉数据
        params.put("auto.offset.reset", "earliest");
        // 指定每次消费完数据是否记录offset
        params.put("enable.auto.commit", "true");

        JavaInputDStream<ConsumerRecord<String, String>> directStream1 = KafkaUtils.createDirectStream(ssc, LocationStrategies.PreferConsistent(), ConsumerStrategies.<String, String>Subscribe(topics, params));

        directStream1.foreachRDD(new VoidFunction<JavaRDD<ConsumerRecord<String, String>>>() {
            @Override
            public void call(JavaRDD<ConsumerRecord<String, String>> rdd1) throws Exception {
                JavaRDD<String> rdd2 = rdd1.map(new Function<ConsumerRecord<String, String>, String>() {
                    @Override
                    public String call(ConsumerRecord<String, String> v1) throws Exception {
                        return v1.value();
                    }
                });
                // ["hello java", "hello spark hadoop", "..."];
                JavaPairRDD<String, Integer> rdd3 = rdd2.flatMapToPair(new PairFlatMapFunction<String, String, Integer>() {
                    @Override
                    public Iterator<Tuple2<String, Integer>> call(String s) throws Exception {
                        String[] arr = s.split(" ");
                        ArrayList<Tuple2<String, Integer>> res = new ArrayList<>();
                        for (String wc : arr) {
                            res.add(new Tuple2<>(wc, 1));
                        }
                        return res.iterator();
                    }
                });
                // [hello->1, hadoop->1, hello->1, spark->1, ...]
                JavaPairRDD<String, Integer> rdd4 = rdd3.reduceByKey(new Function2<Integer, Integer, Integer>() {
                    @Override
                    public Integer call(Integer v1, Integer v2) throws Exception {
                        return v1 + v2;
                    }
                });
                System.out.println(rdd4.collect());
            }
        });

        // 启动streaming程序
        ssc.start();

        // 阻塞
        ssc.awaitTermination();

    }
}
