package org.huangrui.spark.scala.core.rdd.dep

import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}

/**
 *
 * @Author hr
 * @Create 2024-10-19 11:40
 */
object Spark05_Persist_CP {
  def main(args: Array[String]): Unit = {
    val sparConf = new SparkConf().setMaster("local[*]").setAppName("WordCount")
    val sc = new SparkContext(sparConf)
    // TODO 设定检查点路径:推荐HDFS共享文件系统，也可以使用本地文件路径
    sc.setCheckpointDir("cp")

    val lines: RDD[String] = sc.makeRDD(Array("hello world", "hello atguigu", "atguigu", "hahah"))
    val words: RDD[String] = lines.flatMap(_.split(" "))
    val wordToOne = words.map(word => {
      println("***********************")
      (word, 1)
    })

    // TODO 在RDD重复之前使用之前进行持久化操作
    //        wordToOne.cache(); // = mapRDD.persist(StorageLevel.MEMORY_ONLY());
    //        wordToOne.persist(StorageLevel.MEMORY_ONLY());
    // TODO SparkException: Checkpoint directory has not been set in the SparkContext
    // TODO 检查点操作目的是希望RDD结果长时间的保存，所以需要保证数据的安全，会从头再跑一遍，性能比较低
    //      为了提高效率，Spark推荐再检查点之前，执行cache方法，将数据缓存。
    wordToOne.cache()
    wordToOne.checkpoint()

    val wordToSum: RDD[(String, Int)] = wordToOne.reduceByKey(_ + _)
    val array: Array[(String, Int)] = wordToSum.collect()

    println("计算1完毕")
    array.foreach(println)
    println("#########################")
    wordToOne.groupByKey().collect()
    println("计算2完毕")

    sc.stop()

  }
}
