package com.ada.spark.streaming

import kafka.serializer.StringDecoder
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 需求：通过SparkStreaming从Kafka读取数据，并将读取过来的数据做简单计算(WordCount)，最终打印到控制台
  */
object SparkStreaming05_KafkaSource {

    def main(args: Array[String]): Unit = {

        //1.创建SparkConf并初始化SSC
        val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming05_KafkaSource")
        val ssc = new StreamingContext(sparkConf, Seconds(5))

        val topic = "source"

        //4.通过KafkaUtil创建kafkaDSteam
        val kafkaDSteam: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream(
            ssc,
            "hadoop121:2181",
            topic,
            Map(topic -> 3),
            StorageLevel.MEMORY_ONLY
        )

        //5.对kafkaDSteam做计算（WordCount）
        kafkaDSteam.foreachRDD {
            rdd => {
                val word: RDD[String] = rdd.flatMap(_._2.split(" "))
                val wordAndOne: RDD[(String, Int)] = word.map((_, 1))
                val wordAndCount: RDD[(String, Int)] = wordAndOne.reduceByKey(_ + _)
                wordAndCount.collect().foreach(println)
            }
        }

        //6.启动SparkStreaming
        ssc.start()
        ssc.awaitTermination()
    }

}

