package scalaKafka

import org.apache.spark.streaming.{Milliseconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.log4j.{Level, Logger}

object StreamingWordCountTest {
    Logger.getLogger("org.apache").setLevel(Level.WARN)
    def main(args: Array[String]): Unit = {
        //实时计算用spark  用StreamingContext
        val conf=new SparkConf().setAppName("TestStreaming").setMaster("local[*]")
        val context = new SparkContext(conf)
        val ssc = new StreamingContext(context,Milliseconds(5000))//第二个参数是小批次产生的时间间隔

        //通过StreamingContext 创建了抽象的DStream
        //在node2 启动一个端口  nc -lk 8888
        val lines = ssc.socketTextStream("node2",8888)
        //对Dstream 进行操作，操作这个抽象，  切分压平
        val words = lines.flatMap(_.split(" "))
        //组合  一个单词打一个标记1
        val wordAndOne = words.map((_,1))
        //聚合  相加
        val reduced = wordAndOne.reduceByKey(_+_)
        //输出
        reduced.print()

        ssc.start()
        ssc.awaitTermination()


    }
}
