package com.shujia.streaming

import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo01WordCountOnStreaming {
  def main(args: Array[String]): Unit = {

    // 构建SparkContext
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo01WordCountOnStreaming")
      .master("local[2]")
      .getOrCreate()

    /**
     * 创建SparkStreaming的入口
     * 需要SparkContext以及处理的间隔时间
     * 每5s处理一次---> 每5s将接收到的数据 封装成 一个RDD进行处理
     */
    val ssc: StreamingContext = new StreamingContext(spark.sparkContext, Durations.seconds(5))

    /**
     * yum install nc
     * nc -lk 8888
     */

    // 可以通过socket连接nc命令创建的服务器 模拟消息队列
    val lineDS: DStream[String] = ssc.socketTextStream("master", 8888)

    // 统计每个单词的数量
    val wordsDS: DStream[String] = lineDS.flatMap(line => line.split(","))
    val wordsKVDS: DStream[(String, Int)] = wordsDS.map(word => (word, 1))
    val wordCntDS: DStream[(String, Int)] = wordsKVDS.reduceByKey(_ + _)

    wordCntDS
      .print() // 打印


    // 启动任务
    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
