package newStreaming

import kafka.serializer.StringDecoder
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}

object kafkaStreaming extends App {
  val sparkConf = new SparkConf()
  sparkConf.setAppName("kafkaStreaming")
  sparkConf.setMaster("local[4]")
  val sparkContext = new SparkContext(sparkConf)
  val ssc = new StreamingContext(sparkContext, Durations.seconds(30))

  /**
   * AUTO_OFFSET_RESET_CONFIG 可以设置两个参数
   * smallest 代表 --from-beginning
   * largest 代表消费最新的数据
   */

  val kafkaParams = Map(
    ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "master:9092,node1:9092,node2:9092",
    ConsumerConfig.GROUP_ID_CONFIG -> "ssafasdfasfkljadsf",
    ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "smallest"
  )
  /**
   * direct模式  主动拉取数据
   *
   * 当计算完之后才会更新偏移量，将偏移量存到hdfs
   *
   * DS分区数 = topic  partition数
   *
   */
  val ds = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
    ssc,
    kafkaParams,
    Set("fonsview")
  ).map(_._2)
  //  ds.print(10)
  ds.foreachRDD(rdd => {
    println(rdd.count())
  })

  // receiver 模式
  //  val topics = Map("fonsview" -> 2) //2  接收数据并行度

  //  val ds2: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream(
  //    ssc, //streaming上下文对象
  //    "master:2181,node1:2181,node2:2181", //zk地址
  //    "receive1", //消费者组
  //    topics //topic
  //  )
  //  ds2.map(_._2).print()


  ssc.start()
  ssc.awaitTermination()
  ssc.stop()
}
