package com._58city.spark.app;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.spark.SparkConf;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import org.springframework.context.support.AbstractApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;

import com._58city.spark.app.dao.TrackValueTemp;
import com._58city.spark.app.ext.DaoUtil;
import com._58city.spark.app.ext.dto.DispCate;
import com._58city.spark.app.ext.dto.DispCity;
import com._58city.spark.app.mr.MrKafkaTrack4Compare;

/**
 * 新老机房流量对比
 * 
 * @author guolu
 *
 */
public class KafkaTrack4Compare {

	private static final int batchInterval = 2000; // 切片固定2s

	/**
	 * @param args
	 */
	@SuppressWarnings("deprecation")
	public static void main(String[] args) {
		int platform_type = Integer.parseInt(args[0]);
		String kafka_topic = args[1];
		int numStreams = Integer.parseInt(args[2]);

		AbstractApplicationContext context = new ClassPathXmlApplicationContext("application-context.xml");
		DaoUtil.init(context);
		CacheUtil.init();

		Map<Long, DispCate> cate_map = CacheUtil.cateMap();
		Map<Long, DispCity> city_map = CacheUtil.cityMap();

		// 只监控一下域名
		// 所有城市英文名+[city].58.com,p.m.58.com,post.58.com,passport.58.com
		Set<String> cityNameSet = new HashSet<String>();
		for (Map.Entry<Long, DispCity> entry : city_map.entrySet()) {
			cityNameSet.add(entry.getValue().getCity_name_en());
		}
		
		cityNameSet.add("m");
		cityNameSet.add("p.m");
		cityNameSet.add("post");
		cityNameSet.add("passport");
		
		SparkConf conf = new SparkConf()
				.set("spark.streaming.unpersist", "true")
				// Spark来计算哪些RDD需要持久化，这样有利于提高GC的表现。
				.set("spark.default.parallelism", "60")
				// reduceByKeyAndWindow执行时启动的线程数，默认是8个
				.set("spark.yarn.driver.memoryOverhead", "1024").set("spark.yarn.executor.memoryOverhead", "2048")
				.set("spark.storage.memoryFraction", "0.5")
				.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
				.set("spark.kryo.registrator", "com._58city.spark.app.kryo.Registrator");

		JavaStreamingContext jssc = new JavaStreamingContext(conf, new Duration(batchInterval));

		Broadcast<Map<Long, DispCate>> bc_cate_map = jssc.sc().broadcast(cate_map);
		Broadcast<Set<String>> bc_city_set = jssc.sc().broadcast(cityNameSet);

		Map<String, Integer> map = new HashMap<String, Integer>();
		map.put(kafka_topic, 1);
		List<JavaPairDStream<String, String>> kafkaStreams = new ArrayList<JavaPairDStream<String, String>>(numStreams);
		for (int i = 0; i < numStreams; i++) {
			kafkaStreams.add(KafkaUtils.createStream(jssc,
					"10.5.20.18:2181,10.5.20.100:2181,10.9.20.31:2181/opt/kafka", kafka_topic + "_group_temp", map,
					StorageLevel.MEMORY_AND_DISK_SER()));
			// kafkaStreams.add(KafkaUtils.createStream(jssc,
			// "10.5.20.18:2181,10.5.20.100:2181,10.9.20.31:2181/opt/kafka",
			// "test_"+kafka_topic+"_group", map,
			// StorageLevel.MEMORY_ONLY_SER()));
		}

		MrKafkaTrack4Compare mr = new MrKafkaTrack4Compare(new String[] { "${platform}", "${busiLine}",
				"${machineRoom}" }, batchInterval, platform_type);
		mr.setBc_cate_map(bc_cate_map);
		mr.setBc_city_set(bc_city_set);

		List<JavaPairDStream<String, TrackValueTemp>> mapStreams = mr.mapPair(kafkaStreams);
		JavaPairDStream<String, TrackValueTemp> unionStream = null;
		if (mapStreams.size() > 1) {
			unionStream = jssc.union(mapStreams.get(0), mapStreams.subList(1, mapStreams.size()));
		} else {
			unionStream = mapStreams.get(0);
		}
		JavaPairDStream<String, TrackValueTemp> reduceStream = mr.reducePair(unionStream);
		mr.foreachRDD(reduceStream);

		jssc.start();
		jssc.awaitTermination();
	}
}
