package com.ada.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * Transform原语允许DStream上执行任意的RDD-to-RDD函数。即使这些函数并没有在DStream的API中暴露出来，通过该函数可以方便的扩展Spark API。该函数每一批次调度一次。其实也就是对DStream中的RDD应用转换。
  */
object SparkStreaming08_Transform {

    def main(args: Array[String]): Unit = {

        //创建SparkConf并初始化SSC
        val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming08_Transform")
        val streamingContext = new StreamingContext(sparkConf, Seconds(3))

        val lines = streamingContext.socketTextStream("hadoop121", 9999)

        val words = lines.flatMap(_.split(" "))

        //代码（Driver范围）（执行次数1）
        val pairs: DStream[(String, Int)] = words.transform((rdd, time) => {
            //代码（Driver范围）（执行次数m=采集周期）
            rdd.map(
                //代码（Executor范围）（执行次数n=Executor数量）
                word => (word, 1)
            )
        })

        val wordCounts = pairs.reduceByKey(_+_)

        wordCounts.print()

        //foreachRDD
        /*
        lines.foreachRDD {
            rdd => {
                val word: RDD[String] = rdd.flatMap(_.split(" "))
                val wordAndOne: RDD[(String, Int)] = word.map((_, 1))
                val wordAndCount: RDD[(String, Int)] = wordAndOne.reduceByKey(_ + _)
                wordAndCount.collect().foreach(println)
            }
        }*/

        //启动采集器
        streamingContext.start()
        //Drvier等待采集器的执行
        streamingContext.awaitTermination()
    }

}

