package spark;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import scala.Tuple2;

import java.util.Arrays;
import java.util.Iterator;

public class SparkStreamMain {
	public static void main(String[] args) {
		final String checkpointDirectory = "hdfs://Master:9000/library/SparkStreaming/CheckPoint_Data";//checkpoint存放数据的文件夹
		final String dataDirectory = "hdfs://Master:9000/library/SparkStreaming/Data";//SparkStreaming监控的文件夹
		final SparkConf conf = new SparkConf().setMaster("spark://Master:7077").setAppName("SparkStreamingOnHDFS");//设置Master端口和App名称。

		//传入参数为checkpoint目录和工厂
		JavaStreamingContext jsc = createContext(checkpointDirectory, conf);

		JavaDStream lines = jsc.textFileStream(dataDirectory);

		//4.2.1 读取数据并对每一行中的数据以空格作为split参数切分成单词以获得DStream<String>
		JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
			@Override
			public Iterator<String> call(String line) throws Exception {
				return Arrays.asList(line.split(" ")).iterator();
			}
		});
		//4.2.2 使用mapToPair创建PairDStream
		JavaPairDStream<String, Integer> pairs = words.mapToPair(new PairFunction<String, String, Integer>() {

			@Override
			public Tuple2<String, Integer> call(String word) throws Exception {
				return new Tuple2<String, Integer>(word, 1);
			}
		});
		//4.2.3 使用reduceByKey进行累计操作
		JavaPairDStream<String, Integer> wordsCount = pairs.reduceByKey(new Function2<Integer, Integer, Integer>() { //对相同的Key，进行Value的累计（包括Local和Reducer级别同时Reduce）
					@Override
					public Integer call(Integer v1, Integer v2) throws Exception {
						return v1 + v2;
					}
				});

		wordsCount.print();
		/*
		* Spark Streaming执行引擎也就是Driver开始运行，Driver启动的时候是位于一条新的线程中的，当然其内部有消息循环体，用于
		 * 接受应用程序本身或者Executor中的消息；
		 */
		jsc.start();
		try {
			jsc.awaitTermination();
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
		jsc.close();
	}

	/*3.1 定义一个创建StreamingContext的方法*/
	private static JavaStreamingContext createContext(String checkpointDirectory, SparkConf conf) {
		System.out.println("Creating new context");
		SparkConf sparkConf = conf;
		// Create the context with a 15 second batch size
		JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(15));
		ssc.checkpoint(checkpointDirectory);
		return ssc;
	}
}
