package com.kgc.bigdata.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 使用Spark Streaming处理HDFS上的文件
  */
object HdfsWordCount {
  def main(args: Array[String]) {
    def main(args: Array[String]) {
      //判断输入的参数是否为1，即文件输入路径
      if (args.length != 1) {
        System.err.println("Usage: HdfsWordCount <directory>")
        System.exit(1)
      }

      val sparkConf = new SparkConf().setAppName("HdfsWordCount").setMaster("local[2]")

      // 创建StreamingContext
      val ssc = new StreamingContext(sparkConf, Seconds(2))

      // 创建FileInputDStream去读取文件系统上的数据
      val lines = ssc.textFileStream(args(0))

      //使用空格进行分割每行记录的字符串
      val words = lines.flatMap(_.split(" "))

      //类似于RDD的编程，将每个单词赋值为1，并进行合并计算
      val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _)
      wordCounts.print()
      ssc.start()
      ssc.awaitTermination()
    }
  }
}
