package com.offcn.bigdata.spark.streaming.p1

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 基于Direct默认整合kafka
  * spark.streaming.kafka.maxRatePerPartition, 每秒从每个partition中读取到的最大的记录条数
  * 假如，batchInterval=2s，partitionNum=3，该参数为100
  * 这就意味着，spark程序每个批次最多从kafka中读取：2 * 3 * 100 = 600
  *
  *  LocationStrategy
  *     PreferBrokers: 当kafka的brokers和executor都在相同的机器上面的时候使用
  *         此时broker代表的是数据
  *         此时executor代表的计算
  *     PreferConsistent：大多数情况下，使用这种方式，会将partition分散给所有的executor
  *     PreferFixed：当kafka broker分布不均匀的时候，选择这种固定格式，做partition和host的映射，
  *                 不在这个映射中的部分使用PreferConsistent
  *  topic:
  *     bin/kafka-console-consumer.sh --topic spark --bootstrap-server bigdata01:9092,bigdata02:9092,bigdata03:9092 --from-beginning
        bin/kafka-console-producer.sh --topic spark --broker-list bigdata01:9092
        bin/kafka-topics.sh --list --zookeeper bigdata01:2181/kafka
  */
object _04StreamingWithKafkaDirectApp {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setMaster("local").setAppName("StreamingWithKafkaDirect")
                .set("spark.streaming.kafka.maxRatePerPartition", "10")
        val ssc = new StreamingContext(conf, Seconds(2))

        val topics = "spark".split(",")
        val kafkaparams = Map[String, Object](
            "bootstrap.servers" -> "bigdata01:9092,bigdata02:9092,bigdata03:9092",
            "key.deserializer" -> classOf[StringDeserializer].getName,
            "value.deserializer" -> classOf[StringDeserializer].getName,
            "group.id" -> "spark-kafka-group-1",
            "auto.offset.reset" -> "earliest",
            "enable.auto.commit" -> "true" //自动提交offset偏移量信息
        )
        val offsets = Map[TopicPartition, Long](
            new TopicPartition("spark", 0) -> 16,
            new TopicPartition("spark", 1) -> 16,
            new TopicPartition("spark", 2) -> 16
        )
        //从指定的offset加载kafka数据
        val messages: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc,
                LocationStrategies.PreferConsistent,
                ConsumerStrategies.Subscribe(topics, kafkaparams, offsets)
        )

        messages.foreachRDD((rdd, bTime) => {
            if(!rdd.isEmpty()) {
                println("-------------------------------------------")
                println(s"Time: $bTime")
                println("-------------------------------------------")
                rdd.foreach(record => {
                    println(record)
                })
            }
        })

        ssc.start()
        ssc.awaitTermination()
    }
}
