package com.pw.study.realtime

import com.pw.study.common.constants.TopicConstant
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

object RunAPP {

  def main(args: Array[String]): Unit = {
    //声明appName
    var appName="spark"
    var master="local[4]"
    var conf = new SparkConf().setAppName(appName).setMaster(master).set("spark.testing.memory", "4718592000")
    var duration:Int=10
    var groupID="custom2"
    //1初始化容器
    val streamingContext = new StreamingContext(conf, Seconds(duration))
    streamingContext.sparkContext.setLogLevel("error")
    var topics=Array(TopicConstant.STARTUP_LOG)
    //连接kaffka
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "hadoop112:9092,hadoop113:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupID,
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> "false"
    )
    val ds = KafkaUtils.createDirectStream(streamingContext, LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
    )
    var ranges:Array[OffsetRange]=null
    //获取消费的ranges
    //处理数据，之取值
    val ds2 = ds.transform(rdd => {
      ranges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      rdd.map(_.value())
    })
    ds2.print()
   //提交offset
    ds2.foreachRDD(rdd=>{
      rdd.foreach(println(_))
      println(s"ranges: ${ranges}")
      ds.asInstanceOf[CanCommitOffsets].commitAsync(ranges)
    })
    //启动线程
     streamingContext.start()
    //阻塞线程
    streamingContext.awaitTermination()

  }

}
