package com.spark.zhou.demo.streaming;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.Optional;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import scala.Tuple2;

import java.util.*;

/**
 * @Description: kafka直连测试，统计输入的每个单词出现的个数
 * @Author: ZhOu
 * @Date: 2018/11/6
 */
public class DirectStreamDemo {
    public static void main(String[] args) throws InterruptedException {
        SparkConf sparkConf = new SparkConf()
                .setAppName("directStream")
                .setMaster("local[2]")
                .set("spark.default.parallelism", "100");
        JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(10));
        Map<String, Object> kafkaParams = new HashMap<>(8);
        kafkaParams.put("bootstrap.servers", "node1:9092,node2:9092,node3:9092");
        kafkaParams.put("group.id", "test");
        kafkaParams.put("enable.auto.commit", false);
        kafkaParams.put("auto.commit.interval.ms", 5000);
        kafkaParams.put("key.deserializer", StringDeserializer.class);
        kafkaParams.put("value.deserializer", StringDeserializer.class);
        kafkaParams.put("auto.offset.reset", "latest");

        Collection<String> topics = Arrays.asList("test1", "topic-test");

        JavaInputDStream<ConsumerRecord<String, String>> stream = KafkaUtils.createDirectStream(jssc,
                LocationStrategies.PreferConsistent(),
                ConsumerStrategies.Subscribe(topics, kafkaParams));

        //统计最后20s的单词出现数量
        JavaPairDStream<String, Integer> pairDStream = stream.flatMapToPair(record -> {
            String[] values = record.value().split(" ");
            List<Tuple2<String, Integer>> list = new ArrayList<>();
            for (String str : values) {
                list.add(new Tuple2<>(str, 1));
            }
            return list.iterator();
        });
        pairDStream.reduceByKeyAndWindow((x, y) -> x + y, Durations.seconds(20)).print();
        jssc.checkpoint("check_point");

//        pairDStream.foreachRDD(rdd -> rdd.foreachPartition(record -> {
//            //可以放入HDFS或者数据库
//        }));

        //相当于累加数据
        pairDStream.updateStateByKey((values, optional) -> {
            Integer newValue = 0;
            if (optional.isPresent()) {
                newValue = (Integer) optional.get();
            }
            newValue += values.stream().mapToInt(v -> v).sum();
            return Optional.of(newValue);
        }).print();

        jssc.start();
        jssc.awaitTermination();
    }
}
