package SparkRDD.RDD的缓存

import org.apache.commons.lang.StringUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}
import org.junit.Test


/**
 *  通过 cache 和 persist 方法可以对 RDD 进行缓存，其实 cache 方法就等于没有参数的 persist()
 *    底层API：
        /** Persist this RDD with the default storage level (`MEMORY_ONLY`). */
          def persist(): this.type = persist(StorageLevel.MEMORY_ONLY)

        /** Persist this RDD with the default storage level (`MEMORY_ONLY`). */
          def cache(): this.type = persist()

     persist(StorageLevel.MEMORY_ONLY) ---- 有参persist方法通过传入参数可以指定缓存级别

 */
class persistTest {

  @Test
  def test: Unit ={

    val conf = new SparkConf().setAppName("ip统计").setMaster("local[6]")
    val sc   = new SparkContext(conf)

    val resource = sc.textFile("src/main/scala/RDD的缓存/ip.txt")
    val ipRDD = resource.map( item => ( item.split(",")(0) , 1) )
    val cleanRDD = ipRDD.filter( item => StringUtils.isNotEmpty(item._1) )
    var aggRDD = cleanRDD.reduceByKey( (curr,agg) => curr + agg )

    // 调用 persist方法 将Transformation操作缓存，并设置缓存级别
    aggRDD = aggRDD.persist(StorageLevel.MEMORY_ONLY)
    println(aggRDD.getStorageLevel) // 获取当前缓存级别

    /**
     * Total input paths to process : 1
     * StorageLevel(memory, deserialized, 1 replicas)
     */

//    val maxRDD = aggRDD.sortBy( item => item._2,ascending = true ).first()   // Action操作1
//    val minRDD = aggRDD.sortBy( item => item._2,ascending = false ).first()  // Action操作2
//    println("max:"+maxRDD,"min:"+minRDD)

  }

}
