package com.itcast.spark.kafka

import java.lang

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
 * DESC:这是SparkStreaming处理Kafka的数据的010版本，使用的是lowlevel的api，也就是offset管理是在kafka的topic中
 * 1-准备环境StreamingContext,引入处理时间，这里设置为5s
 * 2-准备KafakUtils.creatDirectStream的方式读取kafka中的topic的数据，引入必要配置
 * 3-首先实现Kafka和SparkStreaming整合的WordCount版本(思考：这里kafka数据结构解析)
 * 4-将结果print输出
 * 5-ssc.start
 * 6.ssc.awaitTermination
 * 7.ssc.stop(true,true)
 */
object _011KafkaSparkStreaming010BaseCheckPoint {

  def updateFunc(currentValue: Seq[Int], historyValue: Option[Int]): Option[Int] = {
    val sum: Int = currentValue.sum + historyValue.getOrElse(0)
    Some(sum)
  }

  val CHDIR = "./datasets/checkpoint/ck18"

  def main(args: Array[String]): Unit = {

    val ssc: StreamingContext = { //() => StreamingContext
      StreamingContext.getActiveOrCreate(CHDIR, () => {
        //1-准备环境StreamingContext,引入处理时间，这里设置为5s
        val conf: SparkConf = new SparkConf().setAppName("_01KafkaSparkStreaming010Base").setMaster("local[*]")
        val sc = new SparkContext(conf)
        sc.setLogLevel("WARN")
        //这里就是指定配置项将数据按照5秒为周期进行处理
        val ssc = new StreamingContext(sc, Seconds(5))
        ssc.checkpoint(CHDIR)
        //将获取到的Kafka的数据进行处理，一定要注意streamingcontext作用域范围
        ProcessData(ssc)
        ssc
      })
    }
    //5-ssc.start
    ssc.start()
    //6.ssc.awaitTermination
    ssc.awaitTermination()
    //7.ssc.stop(true,true)
    ssc.stop(true, true)
  }

  private def ProcessData(ssc: StreamingContext) = {
    //2-准备KafakUtils.creatDirectStream的方式读取kafka中的topic的数据，引入必要配置
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "node01:9092,node02:9092,node03:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "each_stream",
      //设置重置偏移量为最新的偏移量
      "auto.offset.reset" -> "latest",
      //设置提交的方式是手动提交，如果是true自动提交，默认提交到checkpoint中
      "enable.auto.commit" -> (false: lang.Boolean)
    )

    /*ssc: StreamingContext,
    locationStrategy: LocationStrategy,
    consumerStrategy: ConsumerStrategy[K, V]*/
    val receiveData: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array("kafkatopic"), kafkaParams)
    )
    //3-首先实现Kafka和SparkStreaming整合的WordCount版本(思考：这里kafka数据结构解析)
    val dataValue: DStream[String] = receiveData.map(x => x.value())
    val result: DStream[(String, Int)] = dataValue
      .flatMap(_.split("\\s+"))
      .map(x => (x, 1))
      .updateStateByKey(updateFunc)
    //4-将结果print输出
    result.print()
  }
}
