package com.sunzm.spark.streaming.listener

import org.apache.commons.lang3.time.FastDateFormat
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, OffsetRange}
import org.apache.spark.streaming.scheduler.{OutputOperationInfo, StreamInputInfo, StreamingListener, StreamingListenerBatchCompleted}

/**
 *
 * 用来保存kafka 偏移量的 StreamingListener
 * kafka自身维护偏移量的方式
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-06-18 14:31
 */
class kafkaStreamingListener(kafkaDStream: InputDStream[ConsumerRecord[String, String]]) extends StreamingListener {

  private val fastDateFormat: FastDateFormat = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss")

  override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted): Unit = {

    val batchInfo = batchCompleted.batchInfo

    //先判断本批次有没有发生错误
    val outputInfos: Map[Int, OutputOperationInfo] = batchInfo.outputOperationInfos

    val milliseconds = batchInfo.batchTime.milliseconds

    val formatDateStr = fastDateFormat.format(milliseconds)

    //不能使用  kafkaDStream.foreachRDD 提交偏移量，否则会提示： Adding new inputs, transformations, and output operations after starting a context is not supported

    var hasError = false

    outputInfos.foreach {
      case (_, outputInfo) => {
        val failureReasonOption: Option[String] = outputInfo.failureReason
        if (failureReasonOption.nonEmpty) {
          hasError = true
        }
      }
    }

    if (!hasError) {
      //没有发生错误，再提交偏移量

      val inputInfo: Map[Int, StreamInputInfo] = batchInfo.streamIdToInputInfo

      inputInfo.foreach {
        case (_, info) => {
          // 下面的代码可以把metadata中的key和value打印出来
          /* val metadata: Map[String, Any] = info.metadata

           metadata.foreach{
             case (k, v) => println(s"${k} -> ${v}")
           }*/

          if (info.numRecords > 0) {

            val offsetOption: Option[Any] = info.metadata.get("offsets")

            if (offsetOption.nonEmpty) {
              try {
                val offset: Any = offsetOption.get

                val offsetRanges: Array[OffsetRange] = offset.asInstanceOf[List[OffsetRange]].toArray

                println(s"[${formatDateStr}]: onBatchCompleted, 更新偏移量...")

                kafkaDStream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)

              } catch {
                case e: Exception => println(e)
              }
            }

          }

        }
      }

    }
  }
}
