package org.hadoop.spark
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}
object FIleStreaming2 {
  def main(args: Array[String]): Unit = {
    if (args.length != 1) {
      println("usage : <in>")
      return;
    }
    val conf = new SparkConf()
      .setAppName("FileStream")
    //声明SparkStreaming对象
    val ssc = new StreamingContext(conf, Seconds(2));
    val rdd: DStream[(LongWritable, Text)] =
      ssc.fileStream[LongWritable, Text, TextInputFormat](directory = args(0), //目录
        filter = ((path: Path) => (!path.getName.contains("_COPYING_"))), //过虑条件
        newFilesOnly = true);
    rdd.map(_._2.toString). //先获取value数据
      flatMap(_.split("\\s+")) //再对文本进行处理
      .map((_, 1)).reduceByKey(_ + _).print();
    ssc.start();
    ssc.awaitTermination();
    Thread.sleep(1000 * 20);
    ssc.stop();
  }
}
