package com.study.bigdata.spark.core.rdd.persist

import org.apache.spark.{SparkConf, SparkContext}

object Scala03_RDD_Persist {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[*]").setAppName("WordCount")
    val sc = new SparkContext(conf)
    sc.setCheckpointDir("cp")

    val lines = sc.makeRDD(List("hello spark hadoop hive","hbase flume"))
    val words = lines.flatMap(_.split(" "))
    val wordToOne = words.map{
      num =>{
        println("******************")
        (num,1)
      }
    }

    // 检查点可以切断血缘关系
    // 检查点为了数据的安全，会重新执行一遍作业，所以会执行两次
    // 为了解决执行两变性能问题，所以缓存和检查点联合使用
    wordToOne.cache()
    wordToOne.checkpoint()

    val wordCount = wordToOne.reduceByKey(_+_)
    println(wordCount.toDebugString)
    wordCount.collect().foreach(println)
    println("-------------------------")
    val rdd1 = wordToOne.groupBy(_._2)
    rdd1.collect()
    println(wordCount.toDebugString)
    sc.stop()

  }

}
