package cn.ipanel.bigdata.job.realtime

import cn.ipanel.bigdata.boot.Job
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, KafkaUtils}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Minutes, Seconds, StreamingContext}

/**
 * @author lzz
 * @environment IntelliJ IDEA 2020.3.1
 * @projectName bigdata_shanxi
 * @date 2023/03/28 16:52
 * @description:
 */
class RealtimeTest extends Job {

  override def onStartup(): Unit = {
    //初始化sparkConf
    val conf = new SparkConf().setMaster("local[2]").setAppName("RealtimeTest")
//    val ssc = new StreamingContext(conf, Minutes(1))
    val ssc = new StreamingContext(conf, Seconds(10))

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "192.168.37.161:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      // 注意：1. 只要不更改group.id，每次重新消费kafka，都是从上次消费结束的地方继续开始，不论"auto.offset.reset”属性设置的是什么
      //      2. 使用新的group.id，默认重最近offset处消费（相当于设置了("auto.offset.reset" -> "latest")；
      //         如果要重最前的offset处消费，必须设置 ("auto.offset.reset" -> "earliest")
      // 这一点跟使用命令 kafka-consumer-groups --bootstrap-server master:9092 --execute --reset-offsets --to-latest --topic lzz-test --group lzz-group
      // 来重置 offset 使后面重新消费是不一样的
      "group.id" -> "lzz-group",
//      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (true: java.lang.Boolean)
    )
    val topics = Array("realtime-test")
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams)
    )
    stream.foreachRDD(rdd => {
//      val offsetRange = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
//      val maped: RDD[(String, String)] = rdd.map(record => (record.key,record.value))
//      val lines = maped.map(_._2)
//      val words = lines.flatMap(_.split(" "))
//      val pair = words.map(x => (x,1))
//      val wordCounts = pair.reduceByKey(_+_)
//      wordCounts.foreach(println)

//      key: null value: 2023032910491|2|20230329104912 offset: 189 partition: 0 timestamp: 1680058152502 timestampType: CreateTime headers: RecordHeaders(headers = [], isReadOnly = false)
//      key: null value: 2023032910491|0|20230329104915 offset: 190 partition: 0 timestamp: 1680058155510 timestampType: CreateTime headers: RecordHeaders(headers = [], isReadOnly = false)
//      key: null value: 2023032910491|1|20230329104918 offset: 191 partition: 0 timestamp: 1680058158518 timestampType: CreateTime headers: RecordHeaders(headers = [], isReadOnly = false)
      rdd.foreach(record => println("key: " + record.key + " value: " + record.value + " offset: " + record.offset()
        + " partition: " + record.partition() + " timestamp: " + record.timestamp() + " timestampType: " + record
        .timestampType() + " headers: " + record.headers()))
    })
    ssc.start
    ssc.awaitTermination

  }
}