package com.lvmama.java.rhino.etl.core;

import java.util.Map;

import org.apache.spark.api.java.function.Function;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;

import com.lvmama.java.rhino.spark.core.kafka.IKafkaSender;
import com.lvmama.java.rhino.spark.utils.Constants;

import scala.Tuple2;

/**
 * 封装Spark从Kafka中获取数据的逻辑 
 * @author wxliyong
 */
public abstract class AbstractKafkaSparkStreamingTemplate extends AbstractSparkStreamingTemplate {
	private static final long serialVersionUID = -3104255947862379044L;

	protected IKafkaSender kafkaProducer;
	
	@Override
	public JavaDStream<String> createJavaDStream(JavaStreamingContext jssc) {
		
		String zkServers = Constants.getInstance().getValue("client.service.kafka.zookeeper.server.list");
		String groupId = Constants.getInstance().getValue("client.service.kafka.group.id");
		// 获得Kafka主题信息
		Map<String, Integer> topicMap = getKafkaTopic();
		// 获取Kafka输入流（接受者）
		JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(jssc, zkServers, groupId, topicMap);
		// 从SparkStreaming中获取数据
		JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() {
			private static final long serialVersionUID = -5682708005814345978L;
			public String call(Tuple2<String, String> tuple2) {
				String str = tuple2._2();
				return str;
			}
		});
		return lines;
	}
	
	/**
	 * 获取监听的Topic
	 * Map<Topic名称, 线程数>
	 * @return
	 */
	public abstract Map<String, Integer> getKafkaTopic();
	
	public IKafkaSender getKafkaProducer() {
		return kafkaProducer;
	}
	public void setKafkaProducer(IKafkaSender kafkaProducer) {
		this.kafkaProducer = kafkaProducer;
	}
}
