package com.shujia.spark.kafka

import kafka.serializer.StringDecoder
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.kafka.KafkaUtils.createStream
import org.apache.spark.streaming.{Duration, Durations, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object Demo3SparkOnKafkaReceiver {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[2]").setAppName("receiver")

    //开启WAL机制   需要设置checkpoint路径
    conf.set("spark.streaming.receiver.writeAheadLog.enable", "true")

    val sc = new SparkContext(conf)

    //创建sparkStreaming上下文对象，指定batch间隔时间
    val ssc = new StreamingContext(sc, Durations.seconds(5))


    ssc.checkpoint("spark/data/receiver")

    /**
      * 连接kafka,创建DStream
      */

    // 需要读取的topic集合，和线程数
    val topics = Map("flume" -> 2)

    /*
       //返回的DS是一个kv格式，key一般没有用
       val kafkaDS: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream(
         ssc,
         "node1:2181",
         "asdsadwasd",
         topics,
         StorageLevel.MEMORY_ONLY //数据拉取过来之后的持久化级别
       )*/


    //连接kafka参数配置
    val kafkaParams = Map[String, String](
      "zookeeper.connect" -> "node1:2181",
      "group.id" -> "asdadsadwasd",
      "auto.offset.reset" -> "smallest"
    )

    //smallest
    //当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，从头开始消费
    //largest
    //当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，消费新产生的该分区下的数据
    //none
    //topic各分区都存在已提交的offset时，从offset后开始消费；只要有一个分区不存在已提交的offset，则抛出异常


    //连接kafka,创建ds
    val kafkaDS = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, topics, StorageLevel.MEMORY_ONLY)


    kafkaDS
      .map(kv => kv._2)
      .flatMap(_.split(","))
      .map((_, 1))
      .reduceByKey(_ + _)
      .print()


    ssc.start()
    ssc.awaitTermination()
    ssc.stop()


  }
}
