package cn.itcast.dstream

import kafka.serializer.StringDecoder
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object SparkStreaming_Kafka_createDirectStream {
    def main(args: Array[String]): Unit = {
        //1.创建sparkConf
        val sparkConf: SparkConf = new SparkConf()
                .setAppName("SparkStreaming_Kafka_createDirectStream").setMaster("local[2]")
        //2.创建sparkContext
        val sc = new SparkContext(sparkConf)
        //3.设置日志级别
        sc.setLogLevel("WARN")
        //4.创建StreamingContext
        val ssc = new StreamingContext(sc,Seconds(5))
        //5.设置chectPoint
        ssc.checkpoint("./Kafka_Direct")
        //6.配置kafka相关参数 （metadata.broker.list为老版本的集群地址）
        val kafkaParams=Map("metadata.broker.list"->"hadoop01:9092,hadoop02:9092,hadoop03:9092","group.id"->"spark_direct")
        //7.定义topic
        val topics=Set("kafka_direct0")
        //8.通过低级api方式将kafka与sparkStreaming进行整合
        val dstream: InputDStream[(String, String)] = KafkaUtils.createDirectStream[String,String,StringDecoder,StringDecoder](ssc,kafkaParams,topics)
        //9.获取kafka中topic中的数据
        val topicData: DStream[String] = dstream.map(_._2)
        //10.按空格进行切分每一行,并将切分的单词出现次数记录为1
        val wordAndOne: DStream[(String, Int)] = topicData.flatMap(_.split(" ")).map((_,1))
        //11.统计单词在全局中出现的次数
        val result: DStream[(String, Int)] = wordAndOne.reduceByKey(_+_)
        //12.打印输出结果
        result.print()
        //13.开启流式计算
        ssc.start()
        ssc.awaitTermination()
    }
}
