package com.shujia.streeam

import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo9OnkafkaDirect {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[4]").setAppName("Demo8SSCOnKafka")

    val sc = new SparkContext(conf)

    val ssc = new StreamingContext(sc, Durations.seconds(5))

    val map = Map(
      "metadata.broker.list" -> "node2:9092,node3:9092,node4:9092"
      //"auto.offset.reset" -> "smallest"
    )

    val topics = Set("student")

    /**
      * direct  模式 每一次job开始计算的时候才去broker中拉去数据
      * 偏移量不在存放到zk  而是需要自己管理
      *
      */
    /*val ds: InputDStream[(String, String)] = KafkaUtils.createDirectStream[
      String,
      String,
      StringDecoder,
      StringDecoder](
      ssc,
      map,
      topics
    )*/

    //手动指定数据偏移量消费数据

    val groupId = "asdaasaasdsdda"
    //创建偏移量保存的表
    OffsetToHbaseUtil.createTable(groupId)


    //指定每个分区开始消费的位置
    val fromOffsets = OffsetToHbaseUtil.getOffset(groupId, "student", 3)

    //数据整理函数
    val messageHandler = (mmd: MessageAndMetadata[String, String]) => mmd.message

    val ds = KafkaUtils.createDirectStream[
      String,
      String,
      StringDecoder,
      StringDecoder,
      String
      ](ssc, map, fromOffsets, messageHandler)


    ds.foreachRDD(rdd => {


      //编写业务逻辑
      val count = rdd.count()

      println("当前batch处理的数据量  " + count)


      /**
        * 将偏移量保存到外部数据库
        *
        * 将偏移量保存到hbase
        *
        */
      //获取当前每个topic每一个分区消费偏移量
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      offsetRanges.foreach(o => {
        //更新偏移量
        OffsetToHbaseUtil.updataOffset(groupId, o.topic, o.partition, o.untilOffset)
        println(s"${o.topic}\t${o.partition}\t${o.fromOffset}\t${o.untilOffset}")
      })

    })

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }
}
