package cn.lsh.spark.streaming;

import org.apache.log4j.lf5.LogLevel;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.Function0;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;

/**
 * Spark standalone or Mesos with cluster模式只有一个Driver
 * 在提交application的时候，添加--supervise 选项，那么如果Driver挂掉就会自动再次启动Driver。
 * 我们可以把SparkContext设置checkpoint到hdfs，然后重启后Spark可以直接从检查点恢复SparkContext
 */
public class SparkStreamOnHDFS {

	private static final String CONTEXT_CHECK_POINT_DIR = "file:/checkpoint/context";

	public static void main(String[] args) throws InterruptedException {
		SparkConf conf = new SparkConf().setMaster("local").setAppName("SparkStreamOnHDFS");

		 //getOrCreate方法：如果checkpoint有数据，就从checkpoint中恢复SparkContext，就走Function0的回调方法创建Context
		JavaStreamingContext jsc = JavaStreamingContext.getOrCreate(CONTEXT_CHECK_POINT_DIR,
				new Function0<JavaStreamingContext>() {
					private static final long serialVersionUID = -163676695727037884L;

					@Override
					public JavaStreamingContext call() throws Exception {
						return createContext(conf);
					}
				});
		jsc.start();
		jsc.awaitTermination();
	}

	public static JavaStreamingContext createContext(SparkConf conf) {
		System.out.println("create new context");
		JavaStreamingContext jsc = new JavaStreamingContext(conf, Durations.seconds(5));
		jsc.sparkContext().setLogLevel(LogLevel.WARN.getLabel());
		/**
		 * 持久化Context，保存：
		 * 	1、配置信息
		 * 	2、DStream操作邏輯
		 * 	3、job的执行进度
		 * 	4、offset
		 */
		jsc.checkpoint(CONTEXT_CHECK_POINT_DIR);
		JavaDStream<String> stringJavaDStream = jsc.textFileStream("file:/data");
		JavaDStream<Long> count = stringJavaDStream.count();
		count.print();
		return jsc;
	}

}
