package org.shj.spark.streaming;

import java.util.Arrays;
import java.util.Iterator;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.flume.FlumeUtils;
import org.apache.spark.streaming.flume.SparkFlumeEvent;

import scala.Tuple2;

/**
 * Flume 向 Spark 推送数据，需要先启动Spark程序 ，再启动Flume
 * 此方式存在以下两种问题：
 * 1. 当数据量过多时，Spark会来不及处理
 * 2. 当没有数据时，Spark会空闲
 * 
 *
 */
public class FlumeWordCount {

	public static void main(String[] args) throws Exception{
		SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("FlumeWordCount");
		
		//第二个参数是指：每收集多长时间的数据就划分一个RDD去执行
		//如果在时间间隔内没有数据的话，会启动空的Job，这样是会造成调度资源的浪费。实际应用中，在提交Job之前应该判断一下。
		JavaStreamingContext jsc = new JavaStreamingContext(conf, Durations.seconds(20));
		jsc.sparkContext().setLogLevel("WARN");
		
		/**
		 * 这里的host和ip的值指的是：Spark 开通RPC服务需要使用的host和IP，在flume的配置文件中的sink的type为avro，
		 * hostname和IP需要和这里的值一致
		 */
		JavaReceiverInputDStream<SparkFlumeEvent> inputStream = 
				FlumeUtils.createStream(jsc, "192.168.31.227", 9999);
		
		JavaDStream<String> words = inputStream.flatMap(new FlatMapFunction<SparkFlumeEvent, String>() {
			private static final long serialVersionUID = 1L;

			public Iterator<String> call(SparkFlumeEvent event) throws Exception {
				String line = new String(event.event().getBody().array());
				return Arrays.asList(line.split(" ")).iterator();
			}
		});
		
		JavaPairDStream<String, Integer> pair = words.mapToPair(new PairFunction<String, String, Integer>() {
			private static final long serialVersionUID = 2823007325722993181L;

			@Override
			public Tuple2<String, Integer> call(String word) throws Exception {
				return new Tuple2<String, Integer>(word, 1);
			}
		});
		
		JavaPairDStream<String, Integer> wordCount = pair.reduceByKey(new Function2<Integer, Integer, Integer>() {
			private static final long serialVersionUID = 8560445270191804880L;

			@Override
			public Integer call(Integer v1, Integer v2) throws Exception {
				return v1 + v2;
			}
		});
		
		wordCount.print();
		
		jsc.start();
		jsc.awaitTermination();
		jsc.close();

	}

}
