package cn.lsh.spark.streaming.kafka;

import cn.lsh.kafka.util.OffsetUtil;
import kafka.common.TopicAndPartition;
import kafka.message.MessageAndMetadata;
import kafka.serializer.IntegerDecoder;
import kafka.serializer.StringDecoder;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.log4j.lf5.LogLevel;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.HasOffsetRanges;
import org.apache.spark.streaming.kafka.KafkaUtils;
import org.apache.spark.streaming.kafka.OffsetRange;
import scala.Tuple2;

import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;

public class SparkStreamingOnKafkaDirectedOffset {

	public static void main(String[] args) throws InterruptedException {
		String topic = "stream_2";
		String consumerGroup = "spark-streaimg-kafka";
		String brokerServer = "node00:9092,node01:9092,node02:9092";
		String zookeeper = "node01:2181,node02:2181,node03:2181";
		//从kafka中读取生产者的消息最大offset
		Map<TopicAndPartition, Long> produceOffset = OffsetUtil.getTopicOffsetsByProducer(brokerServer, topic);
		//从zookeeper中读取消费者的消息offset
		Map<TopicAndPartition, Long> consumeOffset = OffsetUtil.getTopicOffsetsByConsumerForZk(zookeeper, consumerGroup, topic);

		//如果zookeeper中有消费的offset，以zookeeper中的为准，如果没有，就用生产者的offset
		if (!consumeOffset.isEmpty()) {
			produceOffset.putAll(consumeOffset);
		}

		/*for (Map.Entry<TopicAndPartition, Long> entry : produceOffset.entrySet()) {
			//也可以设置重新从0开始读取
			entry.setValue(0L);
		}*/

		JavaStreamingContext streamContext = createStreamContext(produceOffset, brokerServer, zookeeper, consumerGroup);
		streamContext.sparkContext().setLogLevel(LogLevel.WARN.getLabel());
		streamContext.start();
		streamContext.awaitTermination();
	}

	public static JavaStreamingContext createStreamContext(Map<TopicAndPartition, Long> topicOffsets, String brokerServer, String zookeeper, String consumerGroup) {
		SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("SparkStreamingOnKafkaDirectedOffset");
		//设置每秒从每一个partition中最多读多少条数据，仅适用于Direct模式
		conf.set("spark.streaming.kafka.maxRatePerPartition", "10");
		JavaStreamingContext jsc = new JavaStreamingContext(conf, Durations.seconds(5));
		Map<String, String> kafkaConf = new HashMap<>();
		kafkaConf.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerServer);
		Function<MessageAndMetadata<Integer, String>, String> messageAndMetadataRFunction = new Function<MessageAndMetadata<Integer, String>, String>() {

			private static final long serialVersionUID = 5379138132507932722L;

			@Override
			public String call(MessageAndMetadata<Integer, String> message) throws Exception {
				//message就是消息的value，返回值给JavaInputDStream，如果不设置默认只返回value
				return message.key() + "_" + message.message();
			}
		};
		//创建一个可以自定义offset的DirectStream
		JavaInputDStream<String> directStream = KafkaUtils.createDirectStream(jsc,
				Integer.class,
				String.class,
				IntegerDecoder.class,
				StringDecoder.class,
				String.class,
				kafkaConf,
				//topic以及offset信息
				topicOffsets,
				//自定义返回消息的内容
				messageAndMetadataRFunction);
		AtomicReference<OffsetRange[]> offsetRanges = new AtomicReference<>();
		JavaPairDStream<Integer, String> pairDStream = directStream.transformToPair(new Function<JavaRDD<String>, JavaPairRDD<Integer, String>>() {
			private static final long serialVersionUID = -8549747483857395153L;

			@Override
			public JavaPairRDD<Integer, String> call(JavaRDD<String> rdd) throws Exception {
				HasOffsetRanges hasOffsetRanges = (HasOffsetRanges) rdd.rdd();
				//拿到消费的offset
				offsetRanges.set(hasOffsetRanges.offsetRanges());
				//直接在这里保存offset到zookeeper也可以
				// OffsetUtil.saveTopicOffsetsToZk(zookeeper, consumerGroup, hasOffsetRanges.offsetRanges());
				return rdd.mapToPair(s -> new Tuple2<>(Integer.parseInt(s.split("_")[0]), s.split("_")[1]));
			}
		});
		//保存offset到zookeeper
		directStream.foreachRDD(rdd -> OffsetUtil.saveTopicOffsetsToZk(zookeeper, consumerGroup, offsetRanges.get()));
		pairDStream.print(1000);
		return jsc;
	}
}
