package com.atguigu.bigdata.spark.streaming

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream, ReceiverInputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

//运行时候，启动需要先创建一个topic，并且启动一个生产者去生产数据，接收后的消费.
object SprakStreaming05_Source_Kafka {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("kafka").setMaster("local[*]")


    val ssc = new StreamingContext(conf,Seconds(3))


    //读取kafka的数据源
    //kafka作为实时数据源的场合，所以很多计算框架为了很好的和kafka进行数据的交互
    //都会提供对应的工具类:KafkaUtils

    //定义kafka参数
    val kafkaPara: Map[String, Object] = Map[String, Object](
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "hadoop104:9092,hadoop105:9092,hadoop106:9092",
      ConsumerConfig.GROUP_ID_CONFIG -> "atguigu",
      "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer"
    )



    //读取Kafka数据创建DStream
    //kafka其实默认储存方式就k,v，不过key一般不用所以，一般是null.
    val kafkaDStream: InputDStream[ConsumerRecord[String, String]] =
      KafkaUtils.createDirectStream[String, String](ssc,
      LocationStrategies.PreferConsistent,//自动选择
      ConsumerStrategies.Subscribe[String, String](Set("atguigu201021"), kafkaPara))

    val kafkaData: DStream[String] = kafkaDStream.map(_.value())
    kafkaData.print()















    ssc.start()
    ssc.awaitTermination()
  }

}
