package SparkStreaming

import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object StreamingTest {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local[2]")
    conf.setAppName("StreamingTest")

    val sc = new SparkContext()
    //new Streaming有两种方式，若使用第一种方式，则上方不需要再初始化SparkContext
    //在JYM中已经创建了SparkContext
    val ssc = new StreamingContext(conf, Durations.seconds(5))
    ssc.sparkContext.setLogLevel("Error")
    //val ssc = new StreamingContext(sc,Durations.seconds(5))
    //可通过ssc.sparkContext获取到SparkContext的值

    val lines: ReceiverInputDStream[String] = ssc.socketTextStream("hostname", 9000)
    val words: DStream[String] = lines.flatMap(one => {      one.split(" ")    })
    val pairsWords: DStream[(String, Int)] = words.map(one => {      (one, 1)    })
    val result: DStream[(String, Int)] = pairsWords.reduceByKey(_ + _)

    //result.print()

    result.foreachRDD(pairRDD => {
      val newRDD: RDD[(String, Int)] = pairRDD.filter(one => {
        println("filter===============")
        true
      })
      val resultRDD: RDD[(String, Int)] = newRDD.map(one => {
        println("map**************" + one)
        one
      })
      resultRDD.count()
    })

    /*result.foreachRDD(wordCount => {
      println("******producer in Driver********")
      val sortRDD: RDD[(String, Int)] = wordCount.sortByKey(false)
      val result: RDD[(String, Int)] = sortRDD.filter(tp => {
        println("***********producer in Executor**********)
        true
      })
      result.foreach(println)
    })*/

    ssc.start()
    ssc.awaitTermination()
    //ssc.stop(true)会清空SparkContext对象
    //ssc.stop(false)则不会清空对象
    ssc.stop()
  }
}
