package com.atguigu.bigdata.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

//窗口操作
object SparkStreaming07_transform {
//  使用SparkStreaming 完成WordCount
  def main(args: Array[String]): Unit = {

//    spark配置对象
     val sparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming01_WordCount")
//   实时数据分析环境对象
//   采集周期，以指定的时间为周期采集实时数据
    val streamingContext = new StreamingContext(sparkConf,Seconds(3))
    //   从指定的端口中采集数据  hadoop102上执行：nc -lk 9999
    val socketLineDStream = streamingContext.socketTextStream("hadoop102",9998)
    socketLineDStream.foreachRDD(
      rdd=>{rdd.foreach(println)}
    )
//   启动采集器
    streamingContext.start()
//    Driver等待采集器的执行
    streamingContext.awaitTermination()
  }
}

