package cn.itcast.spark.rdd

import org.apache.commons.lang3.StringUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}
import org.junit.Test

class CacheOp {

  /**
   * 1. 创建sc
   * 2. 读取文件
   * 3. 取出IP，赋予初始频率
   * 4. 清洗
   * 5. 统计IP出现的次数
   * 6. 统计出现次数最少的IP
   * 7. 统计出现次数最多的IP
   */
  @Test
  def prepare(): Unit = {
    // 1. 创建sc
    val conf = new SparkConf().setMaster("local[6]").setAppName("cache_prepare")
    val sc = new SparkContext(conf)

    // 2. 读取文件
    val source = sc.textFile("dataset/access_log_sample.txt")

    // 3. 取出IP，赋予初始频率
    val countRDD = source.map(item => (item.split(" ")(0), 1))

    // 4. 清洗
    val clearRDD = countRDD.filter(item => StringUtils.isNotEmpty(item._1))

    // 5. 统计IP出现的次数
    val aggRDD = clearRDD.reduceByKey((curr, agg) => curr + agg)

    // 6. 统计出现次数最少的IP
    val lessIp = aggRDD.sortBy(item => item._2, ascending = true).first()

    // 7. 统计出现次数最多的IP
    val moreIp = aggRDD.sortBy(item => item._2, ascending = false).first()

    println(lessIp, moreIp)

  }

  @Test
  def cache(): Unit = {
    val conf = new SparkConf().setMaster("local[6]").setAppName("cache_prepare")
    val sc = new SparkContext(conf)

    // RDD 处理部分
    val source = sc.textFile("dataset/access_log_sample.txt")
    val countRDD = source.map(item => (item.split(" ")(0), 1))
    val clearRDD = countRDD.filter(item => StringUtils.isNotEmpty(item._1))
    var aggRDD = clearRDD.reduceByKey((curr, agg) => curr + agg)

    // 缓存
    aggRDD = aggRDD.cache()

    // 两个RDD的Action操作
    // 每一个Action都会完整运行一下RDD的整个血统
    val lessIp = aggRDD.sortBy(item => item._2, ascending = true).first()
    val moreIp = aggRDD.sortBy(item => item._2, ascending = false).first()

    println(lessIp, moreIp)

  }

  @Test
  def persist(): Unit = {
    val conf = new SparkConf().setMaster("local[6]").setAppName("cache_prepare")
    val sc = new SparkContext(conf)

    // RDD 处理部分
    val source = sc.textFile("dataset/access_log_sample.txt")
    val countRDD = source.map(item => (item.split(" ")(0), 1))
    val clearRDD = countRDD.filter(item => StringUtils.isNotEmpty(item._1))
    var aggRDD = clearRDD.reduceByKey((curr, agg) => curr + agg)

    // 缓存
    aggRDD = aggRDD.persist(StorageLevel.MEMORY_ONLY)
    println(aggRDD.getStorageLevel)

    // 两个RDD的Action操作
    // 每一个Action都会完整运行一下RDD的整个血统
//    val lessIp = aggRDD.sortBy(item => item._2, ascending = true).first()
//    val moreIp = aggRDD.sortBy(item => item._2, ascending = false).first()

//    println(lessIp, moreIp)

  }

  @Test
  def checkpoint(): Unit = {
    val conf = new SparkConf().setMaster("local[6]").setAppName("cache_prepare")
    val sc = new SparkContext(conf)
    // 设置保存目录，也可以设置为HDFS上的目录
    sc.setCheckpointDir("checkpoint")

    // RDD 处理部分
    val source = sc.textFile("dataset/access_log_sample.txt")
    val countRDD = source.map(item => (item.split(" ")(0), 1))
    val clearRDD = countRDD.filter(item => StringUtils.isNotEmpty(item._1))
    var aggRDD = clearRDD.reduceByKey((curr, agg) => curr + agg)

    // checkpoint
    // 如果调用checkpoint，则会重新计算一下RDD，然后把结果存在HDFS或者本地目录中
    // 应该在checkpoint之前，进行一次Cache
    aggRDD = aggRDD.cache()
    aggRDD.checkpoint()

    // 两个RDD的Action操作
    // 每一个Action都会完整运行一下RDD的整个血统
        val lessIp = aggRDD.sortBy(item => item._2, ascending = true).first()
        val moreIp = aggRDD.sortBy(item => item._2, ascending = false).first()

        println(lessIp, moreIp)

  }

}











































