package cn.jly.bigdata.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * 通过sparkStreaming从kafka中读取数据，并做wordCount
 *
 * @author lanyangji
 * @date 2019/12/5 15:08
 */
object SparkStreaming05_Kafka {

  def main(args: Array[String]): Unit = {

    // sparKConf
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming05_Kafka")
    // spark StreamingContext
    val streamingContext = new StreamingContext(sparkConf, Seconds(4))

    // 1. 通过KafkaUtils创建 KafkaDStream
    val kafkaDStream: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream(
      streamingContext,
      "hadoop102:2181",
      "spark",
      Map("spark_streaming_source" -> 3)
    )

    // 2. 对kafkaDStream做计算
    // kafka的消息是一个k-v对，只是平时我们不传key
    // 直接处理v
    kafkaDStream.foreachRDD(_.flatMap(_._2.split(" ")).map((_, 1)).reduceByKey(_ + _).collect.foreach(println))

    // 3. 启动SparkStreaming
    streamingContext.start()
    streamingContext.awaitTermination()
  }
}
