package day03.acc

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @author wsl
 * @version 2020-12-07
 *   cache操作会增加血缘关系，不改变原有的血缘关系。cache也是懒加载。
 *   cache底层调用的就是persist方法,缓存MEMORY_ONLY
 *   persist方法可以更改存储级别
 *
 *   RDD缓存容错机制：缓存即使丢失，丢失部分的数据会被重新计算
 */
object Cache {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("cache").setMaster("local[*]")
    val sc: SparkContext = new SparkContext(conf)

    val wordToOneRdd: RDD[(String, Int)] = sc.textFile("sparkcore/input/1.txt")
      .flatMap(line => line.split(" "))
      .map {
        word => {
          println("map---")
          (word, 1)
        }
      }
    //cache缓存前打印血缘关系
    println(wordToOneRdd.toDebugString)

    //2个job都会重新计算
    wordToOneRdd.collect().foreach(println)
    wordToOneRdd.collect().foreach(println)
    println("------------------------------------------------")


    wordToOneRdd.cache()

    wordToOneRdd.collect().foreach(println)
    println(wordToOneRdd.toDebugString)
    //再次执行 直接从cache拿结果
    wordToOneRdd.collect().foreach(println)
    println("------------------------------------------------")

    //reduceByKey 自带缓存算子
    val reduceRdd: RDD[(String, Int)] = wordToOneRdd.reduceByKey(_ + _)
    println(reduceRdd.toDebugString)

    reduceRdd.collect().foreach(println)


    Thread.sleep(Long.MaxValue)

    sc.stop()

  }
}
