package com._58city.spark.app;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.spark.SparkConf;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import org.springframework.context.support.AbstractApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;

import com._58city.spark.app.dao.TrackValue;
import com._58city.spark.app.ext.DaoUtil;
import com._58city.spark.app.mr.MrKafkaTrack;


public class KafkaTrackStreaming {
		
	private static final int  batchInterval = 2000; //切片固定2s
	/**
	 * @param args
	 */
	public static void main(String[] args) {
		int platform_type = Integer.parseInt(args[0]);
		String kafka_topic = args[1];
		int numStreams = Integer.parseInt(args[2]);
		
		AbstractApplicationContext context =  new ClassPathXmlApplicationContext("application-context.xml");
		DaoUtil.init(context);
		CacheUtil.init();

		SparkConf conf = new SparkConf()
				.set("spark.streaming.unpersist", "true") //Spark来计算哪些RDD需要持久化，这样有利于提高GC的表现。
				.set("spark.default.parallelism", "60")	//reduceByKeyAndWindow执行时启动的线程数，默认是8个
				.set("spark.yarn.driver.memoryOverhead", "1024") //Driver的堆外内存
				.set("spark.yarn.executor.memoryOverhead", "2048") //Executor的堆外内存
				.set("spark.storage.memoryFraction", "0.5")
				.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
				.set("spark.kryo.registrator", "com._58city.spark.app.kryo.Registrator");
		
		JavaStreamingContext jssc = new JavaStreamingContext(conf,new Duration(batchInterval));
	
		
		Map<String, Integer> map = new HashMap<String, Integer>();
		map.put(kafka_topic, 1);
		Map<String, String> kafkaParams = new HashMap<String, String>();
		kafkaParams.put("group.id", "ecdata_test");
		kafkaParams.put("auto.offset.reset", "largest");
		kafkaParams.put("zookeeper.connect" ,"10.126.99.105:2181,10.126.99.196:2181,10.126.81.208:2181,10.126.100.144:2181,10.126.81.215:2181/58_kafka_cluster");
		
		List<JavaPairDStream<String, String>> kafkaStreams = new ArrayList<JavaPairDStream<String, String>>(numStreams);
		for (int i = 0; i < numStreams; i++) {
			 kafkaStreams.add(KafkaUtils.createStream(jssc,String.class,String.class,kafka.serializer.StringDecoder.class,kafka.serializer.StringDecoder.class, kafkaParams, map, StorageLevel.MEMORY_AND_DISK_SER()));
//		    kafkaStreams.add(KafkaUtils.createStream(jssc, "10.5.20.18:2181,10.5.20.100:2181,10.9.20.31:2181/opt/kafka", kafka_topic+"_group", map, StorageLevel.MEMORY_AND_DISK_SER()));
//			kafkaStreams.add(KafkaUtils.createStream(jssc, "10.5.20.18:2181,10.5.20.100:2181,10.9.20.31:2181/opt/kafka", "test_"+kafka_topic+"_group", map, StorageLevel.MEMORY_ONLY_SER()));
		}
		
		MrKafkaTrack mr = new MrKafkaTrack(new String[]{"${platform}","${busiLine}"}, batchInterval, platform_type);
        
        List<JavaPairDStream<String, TrackValue>> mapStreams = mr.mapPair(kafkaStreams);
        JavaPairDStream<String, TrackValue> unionStream = null;
        if(mapStreams.size() > 1){
        	 unionStream = jssc.union(mapStreams.get(0), mapStreams.subList(1, mapStreams.size()));
        }else{
        	 unionStream = mapStreams.get(0);
        }
        JavaPairDStream<String, TrackValue> reduceStream = mr.reducePair(unionStream);
        mr.foreachRDD(reduceStream);
	
	    jssc.start();
		jssc.awaitTermination();
	}

}
