package com.atguigu.stream

import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * 测试Spark实时计算
 */
object TestSparkStreaming_WordCount {

  def main(args: Array[String]): Unit = {

    // 创建配置对象
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("TestSparkStreaming_WordCount")

    // 创建流式处理环境对象
    // 创建对象时，需要传递采集数据的周期（时间）
    val streamingConext = new StreamingContext(sparkConf, Seconds(5))

    // 一个类如果创建SparkContext,那么这个类我们称之为Driver类

    // 从端口号获取数据
    val socketDStream: ReceiverInputDStream[String] = streamingConext.socketTextStream("linux1", 9999)

    // 一行一行的数据 line ==> word
    val wordDStream: DStream[String] = socketDStream.flatMap(_.split(","))

    // word ==> (word, 1)
    val wordToCountDStream: DStream[(String, Int)] = wordDStream.map((_, 1))

    // reduceByKey
    val wordToSumDStream: DStream[(String, Int)] = wordToCountDStream.reduceByKey(_ + _)

    // 打印数据
    wordToSumDStream.print()

    // TODO 启动采集器
    streamingConext.start()

    // TODO Driver不能停止，等待采集器的结束
    // wait, sleep
    streamingConext.awaitTermination()
  }
}
