package com.app.kafka

import org.apache.spark._
import org.apache.spark.streaming.{StreamingContext, _}
/**
 * kafka 分区偏移量  通过检查点保证
 */
object CheckpointsKafka {

  def main(args: Array[String]): Unit = {


    val CKPT_DIR : String = "data/ckpt"

    /**
     * def getActiveOrCreate(
     * checkpointPath: String,
     * creatingFunc: () => StreamingContext,
     * hadoopConf: Configuration = SparkHadoopUtil.get.conf,
     * createOnError: Boolean = false
     * ): StreamingContext
     */
    val ssc: StreamingContext = StreamingContext.getActiveOrCreate(
      CKPT_DIR,
      () => {
        val conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")

        // TODO: 设置消费最大数量
        conf.set("spark.streaming.kafka.maxRatePerPartition", "10000")

        val ssc = new StreamingContext(conf, Seconds(1))
        //业务逻辑
        Process.processData(ssc)

        ssc.checkpoint(CKPT_DIR)
        ssc
      }

    )

    ssc.start()
    ssc.awaitTermination()
    ssc.stop(true,true)
  }

}
