package com.bd03.streaminglearn.day0330

import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}

object TestFileDemo {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)
    val conf = new SparkConf().setMaster("local[*]").setAppName(this.getClass.getSimpleName)
    val ssc = new StreamingContext(conf,Seconds(2))

    val data = ssc.textFileStream("d://data/sparktest/")
    //使用DStream上的操作,操作DStream
    data.flatMap(t=>{
      t.split(" ")
    }).map((_,1)).reduceByKey(_+_).print()
    //APP应对的是10W
    //某些APP可以应对每分钟上E次请求

    //	通过对源 DStream 的每个 RDD 应用 RDD-to-RDD 函数， 生成一个新的DStream
    //使用rdd上的算子,操作rdd
    val value: DStream[(String, Int)] = data.transform(rdd => {
      rdd.flatMap(t => t.split(" ")).map((_, 1)).reduceByKey(_ + _)
    })


  /*  val str = "a a a a "
    val strings: Array[String] = str.split(" ")
    val tuples: Array[(String, Int)] = strings.map(t=>{(t,1)})*/


    ssc.start()
    ssc.awaitTermination()

   /* data.foreachRDD(rdd=>{

      rdd.foreachPartition(partitionRdd=>{

        partitionRdd.foreach(t=>{

        })
      })
    })*/
  }


}
