package com.atguigu.bigdata.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Duration, StreamingContext, StreamingContextState}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}

object SprakStreaming12_Out {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("StreamWordCount")

    //sparkStreaming程序可能需要业务升级，技术更新，导致采集功能正常关闭
    //关闭时需要调用stop方法，可能调用位置不合适,所以创建一个新的线程去关闭.
    val ssc = new StreamingContext(conf,Duration(3000))


    val socketData: ReceiverInputDStream[String] = ssc.socketTextStream("localhost",9999)

    val words: DStream[String] = socketData.flatMap(_.split(" "))
    val wordToOne: DStream[(String, Int)] = words.map((_,1))
    val wordToCount: DStream[(String, Int)] = wordToOne.reduceByKey(_ + _)
    /*//Sparkstraming中处理的数据流必须输出，否则数据会丢失.
    wordToCount.print()
*/
    //sparkstreaming的输出功能比较简单，一般采用rdd（行动算子操作）
    wordToCount.transform(
      rdd=>{
       rdd//结果需要用，返回.相当于rdd.map()
      }
    )
    wordToCount.foreachRDD(
      rdd=>{
        rdd.foreach(println)//结果不返回，相当于rdd.foreach()
      }
    )


    ssc.start()
    ssc.awaitTermination()







  }
}
