package org.huangrui.spark.scala.core.rdd.dep

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 *
 * @Author hr
 * @Create 2024-10-19 13:40
 */
object Spark06_Persist {
  def main(args: Array[String]): Unit = {
    val sparConf = new SparkConf().setMaster("local[*]").setAppName("WordCount")
    val sc = new SparkContext(sparConf)
    // TODO 设定检查点路径:推荐HDFS共享文件系统，也可以使用本地文件路径
    sc.setCheckpointDir("cp")

    val lines: RDD[String] = sc.makeRDD(Array("hello world", "hello atguigu", "atguigu", "hahah"))
    val words: RDD[String] = lines.flatMap(_.split(" "))
    val wordToOne = words.map(word => {
      println("***********************")
      (word, 1)
    })

    wordToOne.cache()
//    wordToOne.checkpoint()

    val wordToSum: RDD[(String, Int)] = wordToOne.reduceByKey(_ + _)
    println(wordToSum.toDebugString)
    val array: Array[(String, Int)] = wordToSum.collect()

    println("计算1完毕")
    array.foreach(println)
    println("#########################")
    wordToOne.groupByKey().collect()
    println(wordToSum.toDebugString)
    println("计算2完毕")
    // TODO cache方法会在血缘关系中增加依赖关系。
    // TODO checkpoint方法切断（改变）血缘关系。

    sc.stop()
  }
}
