package com.yanggu.spark.core.rdd.lineage

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

//RDD的血缘和依赖关系

/**
 * 解释说明
 * 1、(8) 表示8个分区, 这个和电脑的cpu核数有关系。
 * 2、MapPartitionsRDD[2] 表示依赖于之前的两个RDD算子。
 * 3、当遇到 +-(8) 表示有Shuffle过程。当存在shuffle时, task会一分为二。
 */

/*

(8) ParallelCollectionRDD[0] at makeRDD at RDD_Lineage_01.scala:15 []
----------------------
(8) MapPartitionsRDD[1] at flatMap at RDD_Lineage_01.scala:19 []
 |  ParallelCollectionRDD[0] at makeRDD at RDD_Lineage_01.scala:15 []
----------------------
(8) MapPartitionsRDD[2] at map at RDD_Lineage_01.scala:23 []
 |  MapPartitionsRDD[1] at flatMap at RDD_Lineage_01.scala:19 []
 |  ParallelCollectionRDD[0] at makeRDD at RDD_Lineage_01.scala:15 []
----------------------
(8) ShuffledRDD[3] at reduceByKey at RDD_Lineage_01.scala:27 []
 +-(8) MapPartitionsRDD[2] at map at RDD_Lineage_01.scala:23 []
    |  MapPartitionsRDD[1] at flatMap at RDD_Lineage_01.scala:19 []
    |  ParallelCollectionRDD[0] at makeRDD at RDD_Lineage_01.scala:15 []


 */
/**
 * RDD只支持粗粒度转换，即在大量记录上执行的单个操作。
 * 将创建RDD的一系列Lineage（血统）记录下来，以便恢复丢失的分区。
 * RDD的Lineage会记录RDD的元数据信息和转换行为，当该RDD的部分分区数据丢失时，它可以根据这些信息来重新运算和恢复丢失的数据分区。
 */
object RDD_Lineage_01 {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("WordCount")

    val sc = new SparkContext(sparkConf)

    val fileRDD: RDD[String] = sc.makeRDD(List[String]("hello word", "java scala", "spark flink", "mysql hive"))
    println(fileRDD.toDebugString)
    println("----------------------")

    val wordRDD: RDD[String] = fileRDD.flatMap(_.split(" "))
    println(wordRDD.toDebugString)
    println("----------------------")

    val mapRDD: RDD[(String, Int)] = wordRDD.map((_,1))
    println(mapRDD.toDebugString)
    println("----------------------")

    val resultRDD: RDD[(String, Int)] = mapRDD.reduceByKey(_+_)
    println(resultRDD.toDebugString)

    resultRDD.collect().sortBy(- _._2).foreach(t => println(t._1 + ": " + t._2))

    sc.stop
  }

}