package com.spark.mooc.ch7_sparkstreaming.part03_kafkaDataSource

import com.spark.mooc.ch7_sparkstreaming.part02_basicInputSource.socket.StreamingExamples
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Minutes, Seconds, StreamingContext}


/**
 * @description:
 * @time: 2020/11/29 16:59
 * @author: lhy
 */
object KafkaWordConsumer {
    def main(args: Array[String]): Unit = {
        StreamingExamples.setStreamingLogLevels()       // 设置日志显示级别
        val conf: SparkConf = new SparkConf().setAppName("KafkaWordConsumer")
        val ssc = new StreamingContext(conf, Seconds(10))
        /*
         设置检查点，如果存放在HDFS上面，则写成类似
         【检查点】的作用：当数据量很大的时候，如果不设置检查点有可能会发生信息的丢失
                    企业应用一定要写，平时测试不用
         */
//        ssc.checkpoint("output/dataFrame/checkpoint")
        val zkQuorum = "node02:2181,node03:2181,node04:2181"    // zookeeper服务器地址
        val group = "1"     //topic所在的group，可以自定义名称
        val topics = "topic001-wordsender"       //topic的名称 如有多个用","进行隔开
        val numThreads = 3      //每个topic的分区数
        val topicMap: Map[String, Int] = topics.split(",").map((_,numThreads.toInt)).toMap
        val lineMap: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream(ssc,zkQuorum,group,topicMap)
        val lines: DStream[String] = lineMap.map(_._2)
        val words: DStream[String] = lines.flatMap(_.split(" "))
        val pair: DStream[(String, Int)] = words.map((_,1))
        // 词频统计
        val wordCounts: DStream[(String, Int)] = pair.reduceByKeyAndWindow(_ + _,_ - _,Minutes(2),Seconds(10),2)
        wordCounts.print()
        ssc.start()
        ssc.awaitTermination()
    }
}
