package org.apache.spark.examples.streaming;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.examples.utils.StreamingExamples;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;

/**
 * Created by admin on 2019/3/29.
 */
public class JavaQueueStream {
    public static void main(String[] args){
        System.setProperty("hadoop.home.dir", "C:/hadoop-2.6.0");
        StreamingExamples.setStreamingLogLevels();
        SparkConf  sparkConf = new SparkConf().setAppName("JavaQueueStream");
        //创建上下文
        JavaStreamingContext  ssc =   new JavaStreamingContext(sparkConf,new Duration(1000));
        /**
         * //创建可以推送RDD的队列
          //一个QueueInputDStream
          //创建一些RDD并将其推送到队列中
         */
        List<Integer> list = new ArrayList<>();
        for (int i=0;i<1000;i++){
            list.add(i);
        }
        Queue<JavaRDD<Integer>> rddQueue =new LinkedList<>();
        for (int i=0;i<30;i++){
            rddQueue.add(ssc.sparkContext().parallelize(list));
        }
        //创建QueueInputDStream并使用它做一些处理
        JavaDStream<Integer> inputStreams = ssc.queueStream(rddQueue);
        JavaPairDStream<Integer, Integer> mappedStream = inputStreams.mapToPair(i -> new Tuple2<>(i % 10, 1));
        mappedStream.print();
        JavaPairDStream<Integer, Integer> reducedStream = mappedStream.reduceByKey(
                (i1, i2) -> i1 + i2);
        reducedStream.print();
        ssc.start();
        try {
            ssc.awaitTermination();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
}
