package com.yanggu.spark.core.rdd.lineage

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

//RDD的血缘和依赖关系
object RDD_Dependenics_02 {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("WordCount")

    val sc = new SparkContext(sparkConf)

    //List()
    val fileRDD: RDD[String] = sc.makeRDD(List[String]("hello word", "java scala", "spark flink", "mysql hive"))
    println(fileRDD.dependencies)
    println("----------------------")

    //List(org.apache.spark.OneToOneDependency@60e06f7d)
    val wordRDD: RDD[String] = fileRDD.flatMap(_.split(" "))
    println(wordRDD.dependencies)
    println("----------------------")

    //List(org.apache.spark.OneToOneDependency@5b47731f)
    val mapRDD: RDD[(String, Int)] = wordRDD.map((_,1))
    println(mapRDD.dependencies)
    println("----------------------")

    //List(org.apache.spark.ShuffleDependency@62b3a2f6)
    val resultRDD: RDD[(String, Int)] = mapRDD.reduceByKey(_+_)
    println(resultRDD.dependencies)

    resultRDD.collect().sortBy(- _._2).foreach(t => println(t._1 + ": " + t._2))

    sc.stop
  }

}