package sparkStreaming

import kafka.serializer.StringDecoder
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}



object KafkaDriver{
	def main(args: Array[String]): Unit = {
		//1.创建SparkConf并初始化SSC
		val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("KafkaSparkStreaming")
		val ssc = new StreamingContext(sparkConf, Seconds(5))

		//4.通过KafkaUtil创建kafkaDSteam
		val kafkaDSteam: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream(
			ssc,
			"nn1.hadoop:2181,nn2.hadoop:2181",
			"spark_group",
			Map("spark" -> 3)
		)


		//5.对kafkaDSteam做计算（WordCount）
		kafkaDSteam.foreachRDD {
			rdd => {
				val word: RDD[String] = rdd.flatMap(_._2.split(" "))
				val wordAndOne: RDD[(String, Int)] = word.map((_, 1))
				val wordAndCount: RDD[(String, Int)] = wordAndOne.reduceByKey(_ + _)
				wordAndCount.collect().foreach(println)
			}
		}

		//6.启动SparkStreaming
		ssc.start()
		ssc.awaitTermination()

	}
}
