package com.bigdata.core.example

import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}

object CacheAndPersist {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName("cacheAndPersist")
    conf.setMaster("local")
    val sc = new SparkContext(conf)
    sc.setLogLevel("error")

    val lines = sc.textFile("data/words")

    var wordsRDD: RDD[String] = lines.flatMap(_.split(","))

    // 添加缓存，懒加载，以一次action会触发添加cache
    // wordsRDD = wordsRDD.cache()
    // wordsRDD.persist()

    wordsRDD = wordsRDD.persist(StorageLevel.MEMORY_ONLY)
    // wordsRDD = wordsRDD.persist(StorageLevel.MEMORY_AND_DISK)

    // 会加载文件并读取数据
    var startTime: Long = System.currentTimeMillis()
    val result: RDD[(String, Int)] = wordsRDD.map((_, 1)).reduceByKey(_ + _)
    // action 算子，数据来源是HDFS（磁盘）
    result.foreach(println)
    println(System.currentTimeMillis() - startTime)


    // 直接从缓存读取数据
    startTime = System.currentTimeMillis()
    val result1: RDD[(String, Int)] = wordsRDD.map((_, 1)).reduceByKey(_ + _)
    // action 算子，数据来源 内存
    result1.foreach(println)
    println(System.currentTimeMillis() - startTime)

    // 让线程睡眠，不退出
    Thread.sleep(Integer.MAX_VALUE)


  }

}
