package com.shujia.stream

import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Duration, Durations, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object Demo1WordCount {
  def main(args: Array[String]): Unit = {


    /**
      * 1、创建SparkContext
      *
      */
    val conf: SparkConf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("stream")

    val sc = new SparkContext(conf)

    /**
      * 2、创建SparkStreaming环境
      *
      * 指定batch的间隔时间
      */

    val ssc = new StreamingContext(sc, Durations.seconds(5))


    /**
      * 读取socket中的数据
      * yum install nc
      * nc -lk 8888
      *
      */

    /**
      *
      * DStream: 的底层也是RDD,  每隔5秒封装一个rdd
      */

    val lineDS: DStream[String] = ssc.socketTextStream("master", 8888)

    /**
      * 统计单词的数量
      *
      */

    val wordsDS: DStream[String] = lineDS.flatMap(line => line.split(","))

    val kvDS: DStream[(String, Int)] = wordsDS.map(word => (word, 1))

    val countDS: DStream[(String, Int)] = kvDS.reduceByKey(_ + _)

    countDS.print()

    /**
      * 启动Spark Streaming程序
      *
      */

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()


  }

}
