package streaming.demo8

import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object SparkStreamingDemo1 {
  def main(args: Array[String]): Unit = {
    //这里的线程尽量开大，避免一个线程被receiver接收器占用了，sparkstreaming的程序没有线程跑了
    val sparkConf: SparkConf = new SparkConf().setMaster("local[4]").setAppName("SparkStreamingDemo1")
    val sc: SparkContext = new SparkContext(sparkConf)
    sc.setLogLevel("WARN")
    //创建sparkStreaming有两个参数，第一个参数是sparkcontext，第二个参数是时间的间隔，每隔多长时间执行一次
    val streamingCotext: StreamingContext = new StreamingContext(sc, Seconds(5))
    //通过调用socketTextStream，从socket中获取数据
    val textStream: ReceiverInputDStream[String] = streamingCotext.socketTextStream("node01", 9999)
    //统计每个单词出现的次数
    val map: DStream[(String, Int)] = textStream.flatMap(x => x.split(" ")).map((_, 1))
    /**
      * Return a new DStream by applying `reduceByKey` over a sliding window. This is similar to
      * `DStream.reduceByKey()` but applies it over a sliding window. Hash partitioning is used to
      * generate the RDDs with Spark's default number of partitions.
      * @param reduceFunc associative and commutative reduce function
      * @param windowDuration width of the window; must be a multiple of this DStream's
      *                       batching interval
      * @param slideDuration  sliding interval of the window (i.e., the interval after which
      *                       the new DStream will generate RDDs); must be a multiple of this
      *                       DStream's batching interval
      *
       reduceFunc: (V, V) => V,
      windowDuration: Duration,
      slideDuration: Duration
      */
    val window: DStream[(String, Int)] = map.reduceByKeyAndWindow((x:Int,y:Int)=>x+y,Seconds(5),Seconds(10))
    window.print()
    //将接收到的数据直接打印出来
//    textStream.print()
    //启动sparkStreaming的程序
    streamingCotext.start()
    //程序等待我们手动结束
    streamingCotext.awaitTermination()
  }
}
