package com.learn.lb.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * spark streaming socket单词统计
 * 安装 nc: yum install nmap-ncat.x86_64
 * @author laibo
 * @since 2019/9/2 21:37
 *
 */
object WordCountSocketStreaming {
  def main(args: Array[String]): Unit = {
    //读取socket数据必须设置大于1的线程数，因为接受数据需要独占一个资源
//    val conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
    val conf = new SparkConf()
    //5秒接受一次数据
    val ssc = new StreamingContext(conf, Seconds(5))
    //返回的是InputDStream
    val lines = ssc.socketTextStream("hadoop-1", 9999)
    val wordCount  = lines.flatMap(_.split(" ")).map((_, 1)).reduceByKey(_ + _)
    wordCount.print()
    ssc.start()
    ssc.awaitTermination()
  }
}
