package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}

object Demo17Cache {
  def main(args: Array[String]): Unit = {


    val conf: SparkConf = new SparkConf()
      .setAppName("pi")
      .setMaster("local")

    val sc = new SparkContext(conf)

    //指定chechpoint的路径
    //本地运行可以指定本地路径,如果是集群运行需要指定hdfs路径
    sc.setCheckpointDir("spark/data/checkpoint")


    val studentsRDD: RDD[String] = sc.textFile("spark/data/students.txt")

    val mapRDD: RDD[String] = studentsRDD.map(line => {

      println("student=========")

      line
    })

    /**
      *
      * 对多次使用的rdd进行缓存
      *
      */

    //默认是MEMORY_ONLY
    //mapRDD.cache()


    //使用其它持久化级别
    mapRDD.persist(StorageLevel.MEMORY_AND_DISK_SER)
    //

    /**
      *
      * checkpoint: 快照,主要用于容错
      *
      * checkpoint 将rdd的数据保存到hdfs,会切断rdd的依赖关系
      *
      * 由于checkpoint会重复计算rdd, 所有可以在checkpoint之前进行cache
      *
      *
      * checkpoint主要用spark streaming (流计算中)
      *
      */

    mapRDD.checkpoint()


    val clazzKVRDD: RDD[(String, Int)] = mapRDD.map(line => {
      val clazz: String = line.split(",")(4)
      (clazz, 1)
    })

    val clazzNumRDD: RDD[(String, Int)] = clazzKVRDD.reduceByKey((x, y) => x + y)

    clazzNumRDD.foreach(println)


    val genderKVRDD: RDD[(String, Int)] = mapRDD.map(line => {
      val gender: String = line.split(",")(3)
      (gender, 1)
    })

    val genderBumRDD: RDD[(String, Int)] = genderKVRDD.reduceByKey((x, y) => x + y)
    genderBumRDD.foreach(println)


    while (true) {

    }

  }

}
