package com.shujia.core.transformations

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel

object CheckPointOpt {
  def main(args: Array[String]): Unit = {
    //计算每个班级的人数
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("cache合并")

    val sc = new SparkContext(conf)
    //设置检查点存储的路径
    sc.setCheckpointDir("spark/data/checkpoint")
    //================================================================
    val studentRDD: RDD[Student] = sc.textFile("spark/data/students.txt")
      .map(_.split(","))
      .map {
        case Array(id: String, name: String, age: String, gender: String, clazz: String) =>
          Student(id.toLong, name, age.toInt, gender, clazz)
      }


    /**
     *  对一个job作业中某一个RDD构建检查点，将来不同的job作业，相同的流程，会直接从该检查点往下执行
     *  checkpoint其实也是一个行动算子，会触发作业将数据保存到检查点目录下
     *
     *  checkpoint是避免重复的工作流计算
     */
    studentRDD.checkpoint()


    //计算每个班级的人数
    val resRDD1: RDD[(String, Int)] = studentRDD.map((stu: Student) => (stu.clazz, 1))
      .reduceByKey(_ + _)

    //计算每种年龄的人数
    val resRDD2: RDD[(Int, Int)] = studentRDD.map((stu: Student) => (stu.age, 1))
      .reduceByKey(_ + _)

    resRDD1.foreach(println)
    resRDD2.foreach(println)

    while (true) {

    }
  }
}
