package com.ada.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 文件数据流：能够读取所有HDFS API兼容的文件系统文件，通过fileStream方法进行读取，Spark Streaming 将会监控 dataDirectory 目录并不断处理移动进来的文件，记住目前不支持嵌套目录。
  */
object SparkStreaming02_FileDataSource {

    def main(args: Array[String]): Unit = {

        //1.初始化Spark配置信息
        val sparkConf = new SparkConf().setMaster("local[*]")
            .setAppName("SparkStreaming02_FileDataSource")

        //2.初始化SparkStreamingContext
        val ssc = new StreamingContext(sparkConf, Seconds(5))

        //3.监控文件夹创建DStream
        val dirStream = ssc.textFileStream("hdfs://hadoop121:9000/fileStream")

        //4.将每一行数据做切分，形成一个个单词
        val wordStreams = dirStream.flatMap(_.split(" "))

        //5.将单词映射成元组（word,1）
        val wordAndOneStreams = wordStreams.map((_, 1))

        //6.将相同的单词次数做统计
        val wordAndCountStreams = wordAndOneStreams.reduceByKey(_ + _)

        //7.打印
        wordAndCountStreams.print()

        //8.启动SparkStreamingContext
        ssc.start()
        ssc.awaitTermination()

    }

}
