package com._58city.spark.app;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.spark.SparkConf;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import org.springframework.context.support.AbstractApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;

import com._58city.spark.app.dao.IMCInfoAddValue;
import com._58city.spark.app.ext.DaoUtil;
import com._58city.spark.app.ext.dto.BelongCate;
import com._58city.spark.app.ext.dto.ImcInfoSource;
import com._58city.spark.app.mr.MrKafkaIMCInfoAdd;

public class KafkaImcInfoAddStreaming {

	private static final int batchInterval = 2000; // 切片固定2s
	/**
	 * 运行参数3个： kafka_topic 消费者ID(如ecdata_group) 接收线程数
	 * @param args
	 */
	@SuppressWarnings("deprecation")
	public static void main(String[] args) {
		String kafka_topic = args[0]; // kafka 主题名
		String groupID = args[1];
		int numStreams = Integer.parseInt(args[2]); //开启几个Input DStream接收端


		AbstractApplicationContext context = new ClassPathXmlApplicationContext("application-context.xml");
		DaoUtil.init(context); // 初始化DAO，主要同步几个字典表
		CacheUtil.init(); // 初始化Redis，主要用于统计后数据的存储

		SparkConf conf = new SparkConf()
				.set("spark.streaming.unpersist", "true")
				// Spark来计算哪些RDD需要持久化，这样有利于提高GC的表现。
				.set("spark.default.parallelism", "8")
				// reduceByKeyAndWindow执行时启动的线程数，默认是8个
				.set("spark.yarn.driver.memoryOverhead", "1024")
				.set("spark.yarn.executor.memoryOverhead", "2048")
				.set("spark.storage.memoryFraction", "0.5")
				.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
				.set("spark.kryo.registrator", "com._58city.spark.app.kryo.Registrator");

		JavaStreamingContext jssc = new JavaStreamingContext(conf, new Duration(batchInterval));

		// broadcast广播数据 把字典表发给集群所有机器
		Map<Long, BelongCate> belongCateMap = CacheUtil.belongCateMap();
		Map<String, ImcInfoSource> imcSourceMap = CacheUtil.imcInfoSourceMap();

		Broadcast<Map<Long, BelongCate>> bcBelongCateMap = jssc.sc().broadcast(belongCateMap);
		Broadcast<Map<String, ImcInfoSource>> bcImcSourceMap = jssc.sc().broadcast(imcSourceMap);

		// 从kafka获取数据
		Map<String, Integer> map = new HashMap<String, Integer>();
		map.put(kafka_topic, 1);
		Map<String, String> kafkaParams = new HashMap<String, String>();
		kafkaParams.put("group.id", groupID);
		//消息的最大大小
		kafkaParams.put("fetch.message.max.bytes", String.valueOf(20*1024*1024));
		kafkaParams.put("auto.offset.reset", "largest");
		// 洪伟给的地址
		// 10.126.99.105:2181,10.126.99.196:2181,10.126.81.208:2181,10.126.100.144:2181,10.126.81.215:2181/58_kafka_cluster
		kafkaParams.put("zookeeper.connect", "10.126.99.105:2181,10.126.99.196:2181,10.126.81.208:2181,10.126.100.144:2181,10.126.81.215:2181/58_kafka_cluster");
		List<JavaPairDStream<String, String>> kafkaStreams = new ArrayList<JavaPairDStream<String, String>>(numStreams);
		for (int i = 0; i < numStreams; i++) {
			kafkaStreams.add(KafkaUtils.createStream(jssc, String.class, String.class,
					kafka.serializer.StringDecoder.class, kafka.serializer.StringDecoder.class, kafkaParams, map,
					StorageLevel.MEMORY_AND_DISK_SER()));
		}

		/*
		 * 用于做map，reduce，foreach的操作类 参数一：要用于做Key的属性名 参数二：时间间隔
		 */
		MrKafkaIMCInfoAdd mr = new MrKafkaIMCInfoAdd(new String[] { "${platform}", "${busiLine}", "${belong_cate1}",
				"${belong_cate2}" }, batchInterval);

		mr.setBcBelongCateMap(bcBelongCateMap);
		mr.setBcImcSourceMap(bcImcSourceMap);

		// MAP
		List<JavaPairDStream<String, IMCInfoAddValue>> mapStreams = mr.mapPair(kafkaStreams);

		// 将Stream全部聚合到第一个上
		JavaPairDStream<String, IMCInfoAddValue> unionStream = null;
		if (mapStreams.size() > 1) {
			unionStream = jssc.union(mapStreams.get(0), mapStreams.subList(1, mapStreams.size()));
		} else {
			unionStream = mapStreams.get(0);
		}

		// reduce计算
		JavaPairDStream<String, IMCInfoAddValue> reduceStream = mr.reducePair(unionStream);

		// 输出结果处理
		mr.foreachRDD(reduceStream);

		jssc.start();
		jssc.awaitTermination();
	}

}
