package RDD

import org.apache.commons.lang3.StringUtils
import org.apache.spark.{SparkConf, SparkContext}
import org.junit.Test

class RDD的缓存 {
  val conf = new SparkConf().setMaster("local[6]").setAppName("Test")
  val sc = new SparkContext(conf)

  @Test
  def prepare(): Unit = {
    //读取文件
    val sourceRDD = sc.textFile("C:\\Users\\HR\\Desktop\\Spark_data\\Flowbean.txt")
    //取出IP，并且赋值
    val result = sourceRDD.map(item => (item.split(" ")(0), 1))
      //清洗
      .filter(item => StringUtils.isNotEmpty(item._1))
      //统计次数
      .reduceByKey((curr, agg) => curr + agg)
      //缓存操作
      .cache()
    println("当前缓存级别:" + result.getStorageLevel)

    /** cache的底层就是persist
      * persist可以在使用时传入存储级别
      * .persist(StorageLevel.DISK_ONLY)
      */
    /**
      * 或者这样写
      * var rdd1 = result.reduceByKey((curr,agg)=>curr+agg)
      * rdd1 = rdd1.cache()
      */
    val sort1 = result.sortBy(item => item._2, ascending = false).first()
    val sort2 = result.sortBy(item => item._2).first()
    println(sort1, sort2)
  }

  @Test
  def CheckPoint(): Unit = {
    sc.setCheckpointDir("Checkpoint")
    val sourceRDD = sc.textFile("C:\\Users\\HR\\Desktop\\Spark_data\\Flowbean.txt")

    val result = sourceRDD.map(item => (item.split(" ")(0), 1))
      .filter(item => StringUtils.isNotEmpty(item._1))
      .reduceByKey((curr, agg) => curr + agg)
      .cache()
    //不准确的说，CheckPont是一个Action操作，也就是说，调用Checkpoint会重新调用RDD
    //所以在Checkpoint之前，进行一次Cache
    result.checkpoint()


    val sort1 = result.sortBy(item => item._2, ascending = false).first()
    val sort2 = result.sortBy(item => item._2).first()
    println(sort1, sort2)
  }
}
