package org.hadoop.spark
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}
object FileStreaming {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local[2]") //至少两个线程
      .setAppName("FileStream")
    //声明SparkStreaming对象
    val ssc = new StreamingContext(conf, Seconds(2));
    val context = ssc.sparkContext
    context.setLogLevel("WARN");
    //监听这个目录下文件的增加,如果有文件增加，将会读取文件内容，并处理
    val rdd: DStream[String] = ssc.textFileStream("file:///home/hadoop/1");
    rdd.flatMap(_.split("\\s+")).map((_, 1)).reduceByKey(_ + _).print();
    ssc.start();
    ssc.awaitTermination();
    Thread.sleep(1000*20);
    ssc.stop();
  }
}
