package com.cl.process

import com.cl.util.OffsetUtils
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.log4j.{Level, Logger}

import scala.collection.mutable

/**
 * @Author xc
 * @Date 2023/4/15 12:43
 * @Version 1.0
 *         污水信息数据
 */
//暂未使用
object Sewage_Data_Process {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.ERROR)
    Logger.getLogger("akka").setLevel(Level.ERROR)
    //1.准备SparkStreaming的开发环境
    val conf: SparkConf = new SparkConf().setAppName("Sewage_Data_Process").setMaster("local[*]")
    val context: SparkContext = new SparkContext(conf)
    context.setLogLevel("WARN")
    val ssc: StreamingContext = new StreamingContext(context, Seconds(5)) //连续流批次处理的大小
    ssc.checkpoint("./ssckp")

    //2.准备kafka的连接参数
    val kafkaParams: Map[String, Object] = Map[String, Object](
      "bootstrap.servers" -> "192.168.32.129:9092",
      "group.id" -> "SparkKafka",
      //latest表示如果记录了偏移量的位置，就从记录的位置开始消费，如果没有记录，就从最新/或最后的位置开始消费
      //earliest表示如果记录了偏移量的位置，就从记录的位置开始消费，如果没有记录，就从最开始/最早的位置开始消费
      //none示如果记录了偏移量的位置，就从记录的位置开始消费，如果没有记录，则报错
      "auto.offset.reset" -> "latest", //偏移量的重置位置
      "enable.auto.commit" -> (false: java.lang.Boolean), //是否自动提交偏移量
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer]
    )
    val topics: Array[String] = Array("sewageDischarge")

//    //3.连接kafka的消息
//    val kafkaDS: InputDStream[ConsumerRecord[String, String]] =  KafkaUtils.createDirectStream[String, String](
//        ssc,
//        LocationStrategies.PreferConsistent,
//        ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
//      )


    //从mysql中查询出offsets:Map[TopicPartition, Long]
    val offsetsMap: mutable.Map[TopicPartition, Long] = OffsetUtils.getOffsetMap("SparkKafka", "sewageDischarge")
    val kafkaDS: InputDStream[ConsumerRecord[String, String]] = if (offsetsMap.size > 0) {
      println("MySql记录了offset信息,从offset处开始消费")
      //3.连接kafka的消息
      KafkaUtils.createDirectStream[String, String](
        ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String, String](topics, kafkaParams, offsetsMap)
      )
    } else {
      println("MySql没有记录了offset信息,从latest处开始消费")
      //3.连接kafka的消息
      KafkaUtils.createDirectStream[String, String](
        ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
      )
    }

    //4.实时处理数据并手动维护offset
    val valueDS:DStream[String] = kafkaDS.map(_.value()) //_表示从kafka中消费出来的每一条数据
    valueDS.print()







    //6.手动提交偏移量
    kafkaDS.foreachRDD(rdd => {
      if (rdd.count() > 0) {
//        rdd.foreach(record=>println("从kafka中消费到的每一条数据："+record))
        //        从kafka中消费到的每一条数据：ConsumerRecord(topic = sewageDischarge, partition = 0,
        //        leaderEpoch = 2, offset = 83, CreateTime = 1681548238582, serialized key size = -1,
        //        serialized value size = 3, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = 888)


        //获取偏移量
        val offsets: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
//        for(o <-offsets){
//        println(s"topic=${o.topic},partition=${o.partition},fromOffset=${o.fromOffset},until=${o.untilOffset}")
//          //        topic=sewageDischarge,partition=0,fromOffset=83,until=84
//        }
        //手动提交offset到kafka集群的默认主题__consumer_offsets如果开起来checkpoint，还会提交到checkpoint中
//        kafkaDS.asInstanceOf[CanCommitOffsets].commitAsync(offsets)
        OffsetUtils.saveOffsets(groupId = "SparkKafka", offsets)
      }
    })


    //7.开启sparkstreaming任务并等待结束
    ssc.start()
    ssc.awaitTermination()



  }

}
