package cn.lsh.spark.streaming.kafka;

import org.apache.log4j.lf5.LogLevel;
import org.apache.spark.SparkConf;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;

import java.util.HashMap;
import java.util.Map;

/**
 * SparkStreaming + Kafka的receiver模式，用的比较少
 */
public class SparkStreamingOnKafkaReceiver {

	public static void main(String[] args) throws InterruptedException {
		SparkConf conf = new SparkConf();
		conf.setMaster("local[2]").setAppName("SparkStreamingOnKafkaReceiver")
				//开启预写日志WAL机制
				.set("spark.streaming.receiver.writeAheadLog.enable", "true");
		//receiver模式需要两个线程
		JavaStreamingContext jsc = new JavaStreamingContext(conf, Durations.seconds(5));
		jsc.sparkContext().setLogLevel(LogLevel.WARN.getLabel());
		//开启WAL要设置checkpoint
		jsc.checkpoint("file:/receiver_data");
		String consumerGroup = "spark-streaming-kafka";
		String zookeeper = "node01:2181,node02:2181,node03:2181";
		//设置消费的topic和接收数据的线程数
		Map<String, Integer> topicConsumerConcurrency = new HashMap<>();
		topicConsumerConcurrency.put("stream_1", 1);
		// topicConsumerConcurrency.put("stream_2", 2);
		//JavaP注意，第二个参数是连的zk地址信息，最后一个可选参数是设置Receiver的存储级别，默认是MEMORY_AND_DISK_SER_2
		JavaPairReceiverInputDStream<String, String> stream = KafkaUtils.createStream(jsc, zookeeper, consumerGroup, topicConsumerConcurrency, StorageLevel.MEMORY_AND_DISK_SER_2());
		stream.print();
		jsc.start();
		jsc.awaitTermination();
	}
}
