package cn.itcast.dstream

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}

object ReduceByKeyAndWindowTest {
    def main(args: Array[String]): Unit = {
        //1.创建SparkConf对象 设置appName和master地址  local[2] 表示本地采用2个线程运行任务
        val sparkConf: SparkConf = new SparkConf().setAppName("WordCount").setMaster("local[2]")
        //2.创建SparkContext对象,它是所有任务计算的源头，它会创建DAGScheduler和TaskScheduler
        val sc: SparkContext = new SparkContext(sparkConf)
        //3.设置日志级别
        sc.setLogLevel("WARN")
        //4.创建StreamingContext,需要2个参数，一个是SparkContext，一个是批处理的时间间隔
        val ssc: StreamingContext = new StreamingContext(sc,Seconds(1))
        //5.对接socket数据创建DStream对象，需要socket服务的地址、端口号及存储级别(默认的)
        val dstream: ReceiverInputDStream[String] = ssc.socketTextStream("192.168.121.134",9999)
        //6.按空格进行切分每一行，并将切分的单词出现次数记录为1
        val wordAndOne: DStream[(String, Int)] = dstream.flatMap(_.split(" ")).map(word =>(word,1))

        //7.调用updateStateByKey操作，统计单词在全局中出现的次数
        val windowWords: DStream[(String, Int)] = wordAndOne.reduceByKeyAndWindow((a:Int,b:Int)=>(a+b),Seconds(3),Seconds(1))
        //8.打印输出结果
        windowWords.print()
        //9.开启流式计算
        ssc.start()
        //10.让程序一直运行，除非人为干预停止
        ssc.awaitTermination()
    }
}
