package org.niit.stream

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.niit.stream.SparkStreaming_03.MyReceiver

object SparkStreaming_04 {

  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("streaming")
    //sparkConf.set("spark.testing.memory","4442528585")
    val ssc = new StreamingContext(sparkConf, Seconds(3))
    ssc.sparkContext.setLogLevel("ERROR")


    val lines: ReceiverInputDStream[String] = ssc.socketTextStream("localhost", 9999)
    //Spark SQL用于处理结构化和半结构化数据  特殊之处：是可以写SQL来处理数据  功能也不完整
    //Spark Streaming 用于接收实时数据。 特殊之处：是接收实时数据  功能也不完整
    //对于 Spark SQL Spark Streaming  功能不完整的地方 需要转转换为 RDD 进行操作    Spark SQL Spark Streaming 依赖于Spark Core
                                                                            // DataFrame Dataset  DStream      RDD
    //transform：将DStream转换成RDD。 因为DStream功能不完整 所以需要将DStream转换成 RDD
    //lines : DStream    --- > RDD  --- > DStream
                                          // Driver 周期性
    val wordDS: DStream[(String, Int)] = lines.transform(rdd => {
      //=> 已经将DStream 转换 RDD
      // Driver 周期性
      val words: RDD[String] = rdd.flatMap(   // Executor 端
        _.split(" ")
      ) //[word,hadoop,word]

      val wordOne: RDD[(String, Int)] = words.map(  // Executor 端
        (_, 1)
      )
      val resWord: RDD[(String, Int)] = wordOne.reduceByKey(_ + _)
      //返回也是一个RDD
      resWord
    })

    wordDS.print()

    ssc.start()
    ssc.awaitTermination()
  }

}
