package com.shujia.spark.streaming

import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo1WordCount {
  def main(args: Array[String]): Unit = {

    /**
     * 创建spark Streaming环境,需要依赖Spark core环境
     *
     */

    val conf = new SparkConf()

    //指定连个core执行代码
    conf.setMaster("local[2]")

    conf.setAppName("wc")

    val sc = new SparkContext(conf)

    /**
     * 创建spark streaming环境，需要设置执行的间隔时间
     *
     */

    //创建spark streaming环境
    val ssc = new StreamingContext(sc, Durations.seconds(5))

    /**
     * 1、读取实时的数据源
     * 安装
     * yum install nc
     * 启动socket服务
     * nc -lk 8888
     *
     */

    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)


    /**
     * 统计单词的数量
     */

    val wordsDS: DStream[String] = linesDS.flatMap(_.split(","))

    val kvDS: DStream[(String, Int)] = wordsDS.map((_, 1))

    val countDS: DStream[(String, Int)] = kvDS.reduceByKey(_ + _)

    /**
     * 打印结果
     *
     */
    countDS.print()


    /**
     * 启动spark streaming程序
     *
     */

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()


  }

}
