package com.zhao.kafka.mysql

import java.lang

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies, OffsetRange}

import scala.collection.mutable

/**
 * Description: 业务处理类<br/>
 * Copyright (c) ，2020 ， 赵 <br/>
 * This program is protected by copyright laws. <br/>
 * Date： 2020/12/25 14:52
 * 该对象是业务处理逻辑，主要是消费Kafka数据，再处理之后进行手动将偏移量保存到MySQL中。在启动程序时，
 * 会判断外部存储设备中是否存在偏移量，如果是首次启动则从最初的消费位点消费，如果存在Offset，则从当前的Offset去消费。
 *
 * @author 柒柒
 * @version : 1.0
 */

object ManualCommitOffset {
  def main(args: Array[String]): Unit = {

    val brokers = ConfigConstants.kafkaBrokers
    val groupId = ConfigConstants.groupId
    val topics = ConfigConstants.kafkaTopic
    val batchInterval = ConfigConstants.batchInterval

    val conf: SparkConf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      .setMaster("local[*]")
      .set("spark.serializer", ConfigConstants.sparkSerializer)

    val ssc: StreamingContext = new StreamingContext(conf, batchInterval)
    //必须开启checkpoint,否则会报错
    ssc.checkpoint(ConfigConstants.checkpointDir)

    ssc.sparkContext.setLogLevel("OFF")
    //使用broker和topic创建direct kafka stream
    val topicSet: Set[String] = topics.split(" ").toSet

    //kafka连接参数
    val kafkaParams: Map[String, Object] = Map[String, Object](
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> brokers,
      ConsumerConfig.GROUP_ID_CONFIG -> groupId,
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
      ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> (false: lang.Boolean),
      ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "earliest"
    )

    //从Mysql中读取该主题对应的消费者组的分区偏移量
    val offsetMap: mutable.Map[TopicPartition, Long] = OffsetReadAndSave.getOffsetMap(groupId, topics)
    var inputDStream: InputDStream[ConsumerRecord[String,String]] = null;

    //如果Mysql中已经存在了偏移量,则应该从该偏移量处开始消费
    if (offsetMap.size > 0){
      println("存在偏移量,从该偏移量处开始消费!!!")

      inputDStream = KafkaUtils.createDirectStream[String,String](
        ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String,String](topicSet,kafkaParams,offsetMap)
      )
    }else {
      //如果Mysql中没有存在偏移量,从最早的开始消费
      inputDStream = KafkaUtils.createDirectStream[String,String](
        ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String,String](topicSet,kafkaParams)
      )
    }
    //checkpint时间间隔,必须是batchInterval的整数倍
    inputDStream.checkpoint(ConfigConstants.checkpointInterval)

    //保存batch的offset
    val offsetRanges: Array[OffsetRange] = Array[OffsetRange]()
    //获取当前DS的消息偏移量
    val transformDS: DStream[ConsumerRecord[String, String]] = inputDStream.transform { rdd =>
      //获取offset
      val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      rdd
    }

    /**
     * 状态更新函数
     * @param newValue 新的value值
     * @param stateValue 状态值
     * @return
     */
    def updateFunc(newValue: Seq[Int], stateValue: Option[Int]) = {
      var oldvalue: Int = stateValue.getOrElse(0) //获取状态值
      //遍历当前数据,并更新状态
      for (newValue <- newValue){
        oldvalue += newValue
      }
      //返回更新的状态
      Option(oldvalue)
    }

    //业务逻辑处理
    //该示例统计消息key的个数,用于查看是否从已经提交的偏移量开始消费数据
    transformDS.map(msg =>("spark",msg.value().toInt)).updateStateByKey(updateFunc).print()

    //打印偏移量和数据信息,观察输出的结果
    transformDS.foreachRDD{(rdd,time) =>
      //遍历打印该RDD数据
      rdd.foreach{record =>
        println(s"key=${record.key()},value=${record.value()},partition=${record.partition()},offset=${record.offset()}")
      }
      //打印消费偏移量信息
      for (o <- offsetRanges){
        println(s"topic=${o.topic},partition=${o.partition},fromOffset=${o.fromOffset},untilOffset=${o.untilOffset},time=${time}")
      }
      //将偏移量保存到Mysql中
      OffsetReadAndSave.saveOffsetRanges(groupId, offsetRanges)
    }
    ssc.start()
    ssc.awaitTermination()
  }
}












