package com.study.bigdata.spark.core.rdd.persist

import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}

object Scala01_RDD_Persist {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[*]").setAppName("WordCount")
    val sc = new SparkContext(conf)

    val lines = sc.makeRDD(List("hello spark hadoop hive","hbase flume"))
    val words = lines.flatMap(_.split(" "))
    val wordToOne = words.map{
      num =>{
        println("******************")
        (num,1)
      }
    }

    // 数据持久化 可以将血缘关系进行修改，添加一个和缓存相关的依赖
    // cache操作不安全
    wordToOne.cache()

    // persist可以选择存储级别 如果持久化的话，持久化的文件只能自己使用，而且使用完毕会删除
    wordToOne.persist(StorageLevel.DISK_ONLY_2)

    val wordCount = wordToOne.reduceByKey(_+_)
    wordCount.collect().foreach(println)
    println("-------------------------")
    val rdd1 = wordToOne.groupBy(_._2)
    rdd1.collect()
    /*
    ******************
    ******************
    ******************
    ******************
    ******************
    ******************
    (hive,1)
    (hbase,1)
    (hello,1)
    (spark,1)
    (hadoop,1)
    (flume,1)
    -------------------------
     */
    sc.stop()

  }

}
