package com._58city.spark.consumer.kafka;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;

import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com._58city.spark.consumer.kafka.client.KafkaRangeReceiver;
import com._58city.spark.consumer.kafka.client.KafkaReceiver;

/**kafka低等级consumer接收入口
 * @author huangliang
 *
 */
public class ReceiverLauncher implements Serializable {
	public static final Logger LOG = LoggerFactory.getLogger(ReceiverLauncher.class);
	private static final long serialVersionUID = -3008388663855819086L;
	private static String _zkPath;
	private static String _topic;

	
//	/**spark DStream
//	 * @param ssc
//	 * @param pros
//	 * @param numberOfReceivers
//	 * @param storageLevel
//	 * @return
//	 */
//	public static DStream<MessageAndMetadata> launch(StreamingContext ssc,
//			Properties pros, int numberOfReceivers, StorageLevel storageLevel) {
//		JavaStreamingContext jsc = new JavaStreamingContext(ssc);
//		return launch(jsc, pros, numberOfReceivers, storageLevel).dstream();
//	}
	
	
	/**返回多个接收stream
	 * @param jsc  spark上下文
	 * @param pros 参数
	 * @param numberOfReceivers 接收端个数
	 * @param storageLevel  存储级别
	 * @return
	 */
	public static List<JavaDStream<MessageAndMetadata>> launch_batch(JavaStreamingContext jsc, 
			Properties pros, 
			int numberOfReceivers,StorageLevel storageLevel){
		
			return createStream(jsc, pros, numberOfReceivers, storageLevel);
	}
	

	/**将所有的stream聚合为一个流 
	 * @param jsc
	 * @param pros
	 * @param numberOfReceivers
	 * @param storageLevel
	 * @return
	 */
	public static JavaDStream<MessageAndMetadata> launch(
			JavaStreamingContext jsc, Properties pros, int numberOfReceivers,
			StorageLevel storageLevel) {
		
		JavaDStream<MessageAndMetadata> unionStream = null;
		List<JavaDStream<MessageAndMetadata>> streamsList = createStream(jsc, pros, numberOfReceivers, storageLevel);
		
		// Union all the streams if there is more than 1 stream
		if (streamsList.size() > 1) {
			unionStream = jsc.union(streamsList.get(0),streamsList.subList(1, streamsList.size()));
		} else {
			unionStream = streamsList.get(0);
		
		}
		return unionStream;
		
	}
	
	
	
	
	/**从kafka读取
	 * @param jsc
	 * @param pros
	 * @param numberOfReceivers
	 * @param storageLevel
	 * @return
	 */
	private static List<JavaDStream<MessageAndMetadata>> createStream(
			JavaStreamingContext jsc, Properties pros, int numberOfReceivers,
			StorageLevel storageLevel){
		
		List<JavaDStream<MessageAndMetadata>> streamsList = new ArrayList<JavaDStream<MessageAndMetadata>>();
		
		int numberOfPartition;//kafka分区数
		
		KafkaConfig kafkaConfig = new KafkaConfig(pros);
		
		if (kafkaConfig._stopGracefully) {
			
		}
		
		LOG.info("====== numberOfReceivers : "+numberOfReceivers);
		
		String numberOfPartitionStr = (String) pros.getProperty(Config.KAFKA_PARTITIONS_NUMBER);
		
		if (numberOfPartitionStr != null) {
			numberOfPartition = Integer.parseInt(numberOfPartitionStr);
		} else {
			ZkState zkState = new ZkState(kafkaConfig);
			_zkPath = (String) kafkaConfig._stateConf.get(Config.ZOOKEEPER_BROKER_PATH);
			_topic = (String) kafkaConfig._stateConf.get(Config.KAFKA_TOPIC);
			numberOfPartition = getNumPartitions(zkState);
            LOG.info("====== topic : "+_topic+", numberOfPartition : "+numberOfPartition);
		}
		
		/**根据kafka分区数和设置的接收端个数来判定
		 * 当接收端个数 > kafka分区数  则接收线程数以分区数为准
		 * 反之则hash随机到各接收端 
		 * 
		 */
        if (numberOfReceivers >= numberOfPartition) {
			for (int i = 0; i < numberOfPartition; i++) {
				LOG.info("====== Create as many Receiver as Partition : "+ i);
				streamsList.add(jsc.receiverStream(new KafkaReceiver(pros, i,storageLevel)));
			}
		}else {
			LOG.info("====== Create Range Receivers..");
			Map<Integer, Set<Integer>> rMap = new HashMap<Integer, Set<Integer>>();
			for (int i = 0; i < numberOfPartition; i++) {
				int j = i % numberOfReceivers;
				Set<Integer> pSet = rMap.get(j);
				if (pSet == null) {
					pSet = new HashSet<Integer>();
					pSet.add(i);
				} else {
					pSet.add(i);
				}
				rMap.put(j, pSet);
			}
			for (int i = 0; i < numberOfReceivers; i++) {
				streamsList.add(jsc.receiverStream(new KafkaRangeReceiver(pros,rMap.get(i), storageLevel)));
			}
		}
        return streamsList;
	}

	
	
	private static int getNumPartitions(ZkState zkState) {
		try {
			String topicBrokersPath = partitionPath();
			List<String> children = zkState.getCurator().getChildren().forPath(topicBrokersPath);
			return children.size();
		} catch (Exception e) {
			throw new RuntimeException(e);
		}
	}

	
	private static String partitionPath() {
		return _zkPath + "/topics/" + _topic + "/partitions";
	}

}
