package com.atguigu.stream.test

import com.atguigu.stream.util.MySparkStreamContextUtil
import kafka.serializer.StringDecoder
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils

/**
 * description ：kafka数据源测试-至少消费一次
 * author      ：剧情再美终是戏 
 * mail        : 13286520398@163.com
 * date        ：Created in 2020/1/15 09:25
 * modified By ：
 * version:    : 1.0
 */
// TODO 这个至少消费一次，它的offset是没有提交上去的，该方法是缓存了StreamingContext，储存在了本地
// TODO 重新创建streamingContext时使用StreamingContext.getActiveOrCreate就可以将缓存在本地的offset读取出来，从这个位置开始消费
object KafkaSourceStreamLessOne {

  def main(args: Array[String]): Unit = {

    val ssc: StreamingContext = StreamingContext.getActiveOrCreate("./kafkaSource01", () => consumerRecord(args))

    // 启动 streamingContext
    ssc.start()

    // 让 streamingContext 运行端的driver阻塞，一直运行
    ssc.awaitTermination()
  }

  /**
   * kafka数据源消费数据方法
   *
   * @Author 剧情再美终是戏
   * @Date 2020/1/15 9:30
   * @param args main方法入参
   * @return org.apache.spark.streaming.StreamingContext
   * @Version 1.0
   **/
  def consumerRecord(args: Array[String]): StreamingContext = {
    // 获取 streamingContext
    val ssc: StreamingContext = MySparkStreamContextUtil.get(args)

    // checkpoint 记录消费记录的offset
    ssc.checkpoint("./kafkaSource01")

    // 创建kafka数据源
    val map = Map[String, String](
      ConsumerConfig.GROUP_ID_CONFIG -> "0832",
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "hadoop101:9092,hadoop102:9092,hadoop103:9092"
    )

    val source: InputDStream[(String, String)] = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, map, Set[String]("sparkstream"))

    // wordcount 处理
    val result: DStream[(String, Int)] = source.flatMap(_._2.split(" ")).map((_, 1)).reduceByKey(_ + _)

    // 输出结果
    result.print()

    ssc
  }

}
