package cn.josewu.process

import cn.josewu.util.OffsetUtils
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}



/**
 * @program: Covid19_WZData_process
 * @description: 疫情物资数据的实时处理与分析
 * @author: Mr. Tigger
 * @create 2020-11-14 09:11
 **/
object Covid19_WZData_process {
  def main(args: Array[String]): Unit = {
    //1.准备SparkStreaming的开发环境
    val conf: SparkConf = new SparkConf().setAppName("Covid19_WZData_process").setMaster("local[*]")
    val sc: SparkContext= new SparkContext(conf)
    sc.setLogLevel("WARN")
    val ssc: StreamingContext = new StreamingContext(sc,Seconds(5))
    ssc.checkpoint("./sscckp")
    //补充：SparkStreaming整合Kafka的两种方式：
    //一Receiver模式，KafkaUtils.creatDStream--API创建 会有一个Receiver作为常驻Task运行在Executor进程只能够，一直等待数据的到来


    //二Direct模式
    //2.准备Kafka的连接参数
    val kafkaParams:Map[String,Object] = Map[String,Object] (
        elems = "bootstrap.servers" ->"139.224.46.166:9092,139.224.46.166:9093,139.224.46.166:9094",
                "group.id" ->"SparkKafka",
                "auto.offset.reset" ->"latest",
                "enable.auto.commit" ->(true:java.lang.Boolean),
                "key.deserializer" ->classOf[StringDeserializer],
                "value.deserializer" ->classOf[StringDeserializer]
    )
    val topics:Array[String] = Array("covid19_wz")
    //3.连接Kafka获取消息
    val kafkaDS: InputDStream[ConsumerRecord[String,String]] = KafkaUtils.createDirectStream[String,String](
        ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String,String](topics,kafkaParams))
    //4.实时处理数据
    //val valueDS: DStream[String] = kafkaDS.map(_.value())
    //valueDS.print()
    kafkaDS.foreachRDD(rdd=>{
        if(rdd.count() >0) {
          rdd.foreach(record=>println("从Kafka中消费到每一条消息："+record))
          val offsets:Array[OffsetRange] =rdd.asInstanceOf[HasOffsetRanges].offsetRanges
          for(o <-offsets){
            println(s"topic=${o.topic},partition=${o.partition},fromOffst=${o.fromOffset},until=${o.untilOffset}")
          }
          //手动提交偏移量到Kafka的默认主题：_consumer_offsets中，如果开启了Checkpoint还会提交到Checkpoint中
          kafkaDS.asInstanceOf[CanCommitOffsets].commitAsync(offsets)
          OffsetUtils.saveOffsets(groupid="SparkKafka",offsets)
        }
    }
    )
    //5.将处理分析结果存入MYSQL
    //6.开启SparkStreaming任务并等待结果
    ssc.start()
    ssc.awaitTermination()
  }

}
