package study.core.rdd.persist

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 持久化
 * @author zh
 * @date 2021/5/12 11:06
 */
object TestPersist {
  def main(args: Array[String]): Unit = {
    // spark配置
    // local代表本地环境
    val sparkConf = new SparkConf().setMaster("local").setAppName("WordCount")
    // 建立Spark连接
    val sc = new SparkContext(sparkConf)
    // 设置检查点的存储路径
    sc.setCheckpointDir("./checkpoint")

    // 读取文件，一行一行的数据
    val lines:RDD[String] = sc.textFile("data/test.txt")

    // 将每行数据进行拆分，分为一个一个的单词
    val words:RDD[String] = lines.flatMap(_.split(" "))

    // 对每个单词设置值为1
    val wordToOne:RDD[(String,Int)] = words.map((word) =>
      {
        println("############")
        (word,1)
      }

    )
    println(wordToOne.toDebugString)
    // 缓存持久化，避免重新跑一个job做checkpoint
    wordToOne.cache()
    // 数据检查点，进行检查点计算
    wordToOne.checkpoint()
    // 使用reduceByKey可以对根据key对value进行聚合
    val wordCount:RDD[(String,Int)] = wordToOne.reduceByKey(_+_)
    println("======wordCount=======")
    for(item <- wordCount.collect()){
      println(item)
    }
    println(wordCount.toDebugString)

    println("---------------")

    val wordCount1:RDD[(String,Int)] = wordToOne.reduceByKey(_+_)
    println("======wordCount1=======")
    for(item <- wordCount1.collect()){
      println(item)
    }
    // 关闭spark连接
    sc.stop()
  }
}
