package com.shujia.spark.streaming

import java.util

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.{Durations, StreamingContext}
import redis.clients.jedis.Jedis
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe

import scala.collection.mutable


object Demo7Direct {

  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf()
      .setAppName("direct")
      .setMaster("local[4]")

    /**
      * 创建streaming上下文对象，指定batch的间隔时间,多久计算一次
      */
    val ssc = new StreamingContext(conf,Durations.seconds(5))

    /**
      * earliest
      * 当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，从头开始消费
      * latest
      * 当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，消费新产生的该分区下的数据
      * none
      * topic各分区都存在已提交的offset时，从offset后开始消费；只要有一个分区不存在已提交的offset，则抛出异常
      *
      */

    val groupId = "asdasd"

    val kafkaParams: Map[String, Object] = Map[String,Object](
      "bootstrap.servers" -> "master:9092,node1:9092,node2:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId,
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> "false"
    )

    val topics = Array("test_topic2")

    val key = groupId + ":" + "test_topic2"

    /**
      * 读取redis中的偏移量
      */
    val redis = new Jedis("master",6379)

    val map: util.Map[String, String] = redis.hgetAll(key)

    import scala.collection.JavaConverters._

    val scalaMap: mutable.Map[String, String] = map.asScala

    //消费的偏移量，topic分区 偏移量’
    val partitionOffset: Map[TopicPartition, Long] =  scalaMap.map(kv => {
      val partition: String = kv._1
      val offset: String = kv._2

      val tp = new TopicPartition("test_topic2",partition.toInt)

      (tp,offset.toLong)
    }).toMap

    println(partitionOffset)

    val stream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String,String](
      ssc,
      PreferConsistent,
      Subscribe[String,String](topics,kafkaParams,partitionOffset)
    )

    stream.foreachRDD(rdd => {

      rdd.map(_.value()).foreach(println)

      //获取消费偏移量
      val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      val jedis = new Jedis("master",6379)

      for (offsetRang <- offsetRanges) {

        val fromOffset: Long = offsetRang.fromOffset

        val partition: Int = offsetRang.partition
        val topic: String = offsetRang.topic
        val untilOffset: Long = offsetRang.untilOffset

        println(topic + "\t" + partition + "\t" + fromOffset + "\t" + untilOffset)

        //保存偏移量到redis中
        jedis.hset(key,partition.toString,untilOffset.toString)

      }

    })

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()


  }

}
