package com._58city.spark.app;

import java.util.Properties;

import org.apache.spark.SparkConf;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaStreamingContext;

public abstract class BaseStreaming {
	
	protected Properties init_kafka_props(String zk_hosts,String zk_port,
			String broker_path,String kafka_topic,String consumer_conn,String consumer_path){
		Properties props = new Properties();
		props.put("zookeeper.hosts", zk_hosts);
		props.put("zookeeper.port", zk_port);
		props.put("zookeeper.broker.path", broker_path);
		props.put("kafka.topic", kafka_topic);
		props.put("kafka.consumer.id", kafka_topic+"_group");
		props.put("zookeeper.consumer.connection", consumer_conn);
		props.put("zookeeper.consumer.path", consumer_path);
		// Optional Properties
		props.put("consumer.forcefromstart", "false");
		props.put("consumer.fetchsizebytes", "2097152"); //2MB
		props.put("consumer.fillfreqms", "250"); //500毫秒
		return props;
	}
	
	protected JavaStreamingContext init_context(int batchInterval){
		SparkConf conf = new SparkConf()
		 .set("spark.streaming.unpersist", "true") //Spark来计算哪些RDD需要持久化，这样有利于提高GC的表现。
		 .set("spark.default.parallelism", "30")	//reduceByKeyAndWindow执行时启动的线程数，默认是8个
		 .set("spark.yarn.driver.memoryOverhead", "1024")
		 .set("spark.yarn.executor.memoryOverhead", "2048")
		 .set("spark.storage.memoryFraction", "0.5")
		 .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
		 .set("spark.kryo.registrator", "com._58city.spark.app.kryo.Registrator")
		 .set("spark.storage.blockManagerHeartBeatMs", "120000")	
		 .set("spark.streaming.receiver.writeAheadLog.enable", "false");
		JavaStreamingContext jssc = new JavaStreamingContext(conf,new Duration(batchInterval)); //Spark的运行上下文
		return jssc;
	}
	
	public abstract void run();
	
	public void start() {
         run();
    }

}
