package com.hliushi.spark.rdd

import org.apache.commons.lang3.StringUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}
import org.junit.Test

/**
 * descriptions:
 *
 * author: Hliushi
 * date: 2021/5/15 20:40
 */
class CacheOp {

  val conf: SparkConf = new SparkConf().setMaster("local[6]").setAppName("cache_op")

  val sc = new SparkContext(conf)

  /**
   * 1.创建sc
   * 2.读取文件
   * 3.取出IP, 赋予初始频率
   * 4.过滤
   * 5.统计IP出现的次数
   * 6.统计出现次数最少的IP
   * 7.统计出现次数最多的IP
   */
  @Test
  def prepare(): Unit = {
    // 2.读取文件
    val source = sc.textFile("dataset/access_log_sample.txt")
    //val source = sc.textFile("D:\\Code\\SparkProjects\\spark\\dataset\\access_log_sample.txt")

    // 3.取出IP, 赋予初始频率
    val countRdd = source.map(x => x.split(" ")(0) -> 1)

    // 4.数据清洗
    val cleanRdd = countRdd.filter(x => StringUtils.isNotEmpty(x._1))
    //val cleanRdd = countRdd.filter(x => x._1 != null)

    // 5.统计IP出现的次数(聚合)
    val totalRdd = cleanRdd.reduceByKey((total: Int, curr: Int) => total + curr)

    // 6.统计出现次数最少的IP(得出结论)
    val lessIp = totalRdd.sortBy(x => x._2, ascending = true).first()

    // 7.统计出现次数最多的IP(得出结论)
    val moreIp = totalRdd.sortBy(x => x._2, ascending = false).first()

    println(s"lessIp = ${lessIp}") // lessIp = (190.53.171.171,1)
    println(s"moreIp = ${moreIp}") // moreIp = (91.145.130.78,17)

    sc.stop()
  }


  /**
   * 使用缓存
   * 缓存其实是一种空间换时间的做法, 会占用额外的存储资源, 如果清理?  -> 使用unpersist()方法
   * -
   * 函数原型
   * - cache 方法其实是 persist方法的一个别名
   * def cache(): this.type = persist()
   */
  @Test
  def cache(): Unit = {

    // RDD的处理部分
    val source = sc.textFile("dataset/access_log_sample.txt")
    val countRdd = source.map(x => x.split(" ")(0) -> 1)
    val cleanRdd = countRdd.filter(x => StringUtils.isNotEmpty(x._1))
    var totalRdd = cleanRdd.reduceByKey((total: Int, curr: Int) => total + curr)

    totalRdd = totalRdd.cache()

    // 两个RDD的Action操作
    // 每一个Action都会完整运行一下RDD的整个系统
    val lessIp = totalRdd.sortBy(x => x._2, ascending = true).first()
    val moreIp = totalRdd.sortBy(x => x._2, ascending = false).first()

    println(s"lessIp = ${lessIp}") // lessIp = (190.53.171.171,1)
    println(s"moreIp = ${moreIp}") // moreIp = (91.145.130.78,17)
  }

  /**
   * 使用缓存
   * 函数原型
   * def persist(): this.type = persist(StorageLevel.MEMORY_ONLY)
   * #
   * def persist(newLevel: StorageLevel): this.type
   */
  @Test
  def persist(): Unit = {

    // RDD的处理部分
    val source = sc.textFile("dataset/access_log_sample.txt")
    val countRdd = source.map(x => x.split(" ")(0) -> 1)
    val cleanRdd = countRdd.filter(x => StringUtils.isNotEmpty(x._1))
    var totalRdd = cleanRdd.reduceByKey((total: Int, curr: Int) => total + curr)

    // StorageLevel.MEMORY_ONLY 是默认缓存级别
    totalRdd = totalRdd.persist(StorageLevel.MEMORY_ONLY)
    // totalRdd.getStorageLevel = StorageLevel(memory, deserialized, 1 replicas)
    println(s"totalRdd.getStorageLevel = ${totalRdd.getStorageLevel}")

    // 两个RDD的Action操作
    // 每一个Action都会完整运行一下RDD的整个系统
    val lessIp = totalRdd.sortBy(x => x._2, ascending = true).first()
    val moreIp = totalRdd.sortBy(x => x._2, ascending = false).first()

    println(s"lessIp = ${lessIp}") // lessIp = (190.53.171.171,1)
    println(s"moreIp = ${moreIp}") // moreIp = (91.145.130.78,17)
  }


  /**
   * 函数原型
   * def checkpoint(): Unit
   * 使用checkpoint之前设置Checkpoint的存储路径,
   * 而且如果任务在集群中运行的话, 这个路径必须是HDFS上的路径
   */
  @Test
  def checkpoint(): Unit = {

    // 设置保存checkpoint的目录, 也可以设置为HDFS上的目录
    sc.setCheckpointDir("checkpoint")

    // RDD的处理部分
    val source = sc.textFile("dataset/access_log_sample.txt")
    val countRdd = source.map(x => x.split(" ")(0) -> 1)
    val cleanRdd = countRdd.filter(x => StringUtils.isNotEmpty(x._1))
    var totalRdd = cleanRdd.reduceByKey((total: Int, curr: Int) => total + curr)

    // checkpoint 方法返回值是空 Unit -> void
    // 不准确的说, checkpoint是一个Action操作, 也就是说
    // 如果调用checkpoint, 则会重新计算一下RDD, 然后把结果存在HDFS或者本地目录中
    // 所以, 最佳实践应该在checkpoint之前, 进行依次cache
    totalRdd = totalRdd.cache()
    totalRdd.checkpoint()

    // 两个RDD的Action操作
    // 每一个Action都会完整运行一下RDD的整个系统
    val lessIp = totalRdd.sortBy(x => x._2, ascending = true).first()
    val moreIp = totalRdd.sortBy(x => x._2, ascending = false).first()

    println(s"lessIp = ${lessIp}") // lessIp = (190.53.171.171,1)
    println(s"moreIp = ${moreIp}") // moreIp = (91.145.130.78,17)
  }
}