package com.streaming.kafka.offset

import java.util.Date

import com.fasterxml.jackson.databind.deser.std.StringDeserializer
import org.apache.commons.lang3.time.FastDateFormat
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.{Seconds, StreamingContext}

object DSHS {

  /**
    * Kafka offset 断点还原，保证数据不丢失
    * @param args
    */

  def main(args: Array[String]): Unit = {

    // Define the variable

    val  timeFormat = FastDateFormat.getInstance("yyyy/MM/dd HH:mm:ss.SSS")

    println("Starting Time"+timeFormat.format(new Date()))

    // 流间隔 batch的时间

    var slide_interval = Seconds(1)  //默认为1s

    if(args.length == 1){

      slide_interval = Seconds(args(0).toLong) // 自定义slide_interval

    }

    val bootstrap_servers = "localhost:9092" // Kafka地址

    try{

      // Create Spark Context with 2 seconds- batch interval

      val ssc = SparkSession.builder().appName("")
        .master("loal[2]").getOrCreate()

      val sc = ssc.sparkContext

      val scc = new StreamingContext(sc,slide_interval)

      // set the parameters of kafka

      val kafkaParameters = Map[String,Object](

        "bootstrap.servers" -> bootstrap_servers
        , "key.deserializer" -> classOf[StringDeserializer]
        , "value.deserializer" -> classOf[StringDeserializer]
        , "group.id" -> "use_a_separate_group_id_for_each_stream"

        , "auto.offset.reset" -> "latest"
        , "enable.auto.commit" -> (false: java.lang.Boolean)

        , "max.partition.fetch.bytes" -> (2621440: java.lang.Integer) //default: 1048576
        , "request.timeout.ms" -> (90000: java.lang.Integer) //default: 60000
        , "session.timeout.ms" -> (60000: java.lang.Integer) //default: 30000

      )

      //3.创建要从kafka去读取的topic 的集合对象

      val topics = Array("")

      // 4. 输入流

      /*val directKafkaStream = KafkaUtils.createDirectStream[String,String](

        scc,PreferConsistent,Subscribe[]()
*/

      )







    }







  }




}
