package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}

object Code30CheckPoint {
  def main(args: Array[String]): Unit = {

    /**
     * 检查点：
     *    checkpoint会单独添加一个新的Job任务
     *    checkpoint会在指定目录中添加当前RDD中的数据
     *    checkpoint 会在第一个job执行完成后，再回溯找到带有checkpoint的RDD,再从头执行
     *        对于当前案例添加了checkpoint  flatMap执行了两遍
     *
     *
     *    错误： Checkpoint directory has not been set in the SparkContext
     *    使用检查点，必须要先设置检查点的数据存放路径
     *
     *
     *
     */

    val num = 0;
    val conf: SparkConf = new SparkConf().setMaster("local").setAppName("Cache")
    val sc = new SparkContext(conf)
    sc.setCheckpointDir("spark_code/output/checkpoint")


    val wordsRDD: RDD[String] = sc.parallelize(
      List("hello spark", "hello scala", "hello world"),2
    )

    val wordSplitRDD: RDD[String] = wordsRDD.flatMap(
      x => {
        println("flatMap")
        x.split(" ")
      }
    )
    wordSplitRDD.cache() // 先将RDD进行缓存操作，之后再去做checkpoint
    wordSplitRDD.checkpoint()  // 加上checkpoint执行了6次

    val groupByRDD: RDD[(String, Iterable[String])] = wordSplitRDD.groupBy(x => x)

    val countResRDD: RDD[(String, Int)] = groupByRDD
      .mapValues(
        x => {
          println("mapValues执行了...")
          x.size
        }
      )

//    countResRDD.checkpoint()

    countResRDD
      .saveAsTextFile("spark_code/output/wordCount")


    countResRDD
      .foreach(println)

    // 清空缓存操作
//    countResRDD.unpersist()

    while (true) {}
  }
}
