package org.niit.streaming

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/*

  Spark Streaming 作Kafka消费者
 */
object SparkStreaming_Kafka extends  App {

  val sparkConf = new SparkConf().setMaster("local[*]").setAppName("spark")
  val ssc = new StreamingContext(sparkConf, Seconds(3))
  ssc.sparkContext.setLogLevel("ERROR")

  //1.Kafka的连接进行配置
  val kafkaPara = Map[String,Object](
    ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "node1:9092",
    ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "earliest",
    ConsumerConfig.GROUP_ID_CONFIG -> "G2",
    ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer].getName,
    ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer].getName
  )

  //2.创建消费Kafka数据的流  一条数据
   val kafkaDS: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
    ssc,
    LocationStrategies.PreferConsistent, //官方推荐的 本地策略
    ConsumerStrategies.Subscribe[String, String](Set("BD2"), kafkaPara)
  )

  //3.遍历数据
   val resDS: DStream[String] = kafkaDS.map(record => {
    val topic: String = record.topic()
    val offset: Long = record.offset()
    val value: String = record.value()

    val info = s"主题：${topic},偏移量：${offset},值：${value}"
    info
  })

  resDS.print(100)

  ssc.start()
  ssc.awaitTermination()
}
