package com.offset

import kafka.serializer.StringDecoder
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Duration, Seconds, StreamingContext}
import org.apache.spark.streaming.kafka.KafkaUtils

/**
  * checkpoint解决消费多次的问题
  *   todo 再次运行OffsetApp01，会出现什么情况？为什么？
  */

object OffsetApp02 {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName("")
      .setMaster("local[6]")
      .set("spark.testing.memory","500000000")



    /*

        auto.offset.reset定义了Consumer在ZooKeeper中
          发现没有初始的offset时
          或者发现offset非法时定义Comsumer的行为，
          常见的配置有：

        todo kafka-0.10.1.X版本之前: auto.offset.reset 的值为
          smallest,largest (offest保存在zk中)

        todo kafka-0.10.1.X版本之后: auto.offset.reset 的值更改为:
          earliest:最早
          latest：最近
          none ：没有发现offset时抛异常
          (offest保存在kafka的一个特殊的topic名为:__consumer_offsets里面)

        https://blog.csdn.net/lishuangzhe7047/article/details/74530417
     */
    val map = Map[String, String](
      "metadata.broker.list" -> "shizhonghao:9092",
      "auto.offset.reset" -> "smallest"
    )

    //val topics = Set[String]()
    val topics = "TestTopic".split(",").toSet

    val checkpointDirectory = "data/"

    def functionToCreateContext(): StreamingContext = {
      val ssc = new StreamingContext(conf, Seconds(10)) // new context

      val message = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, map, topics)


      ssc.checkpoint(checkpointDirectory) // set checkpoint directory
      message.checkpoint(Duration(10 * 1000))
      message.foreachRDD(
        rdd => {
          if (!rdd.isEmpty()){
            println("rddCount: "+rdd.count())
          }
        }
      )
      ssc
    }


    val ssc = StreamingContext.getOrCreate(checkpointDirectory, functionToCreateContext _)




    ssc.start()
    ssc.awaitTermination()

  }


}
