package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo16Checkpoint {

  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf()
      .setMaster("local")
      .setAppName("cache")

    val sc = new SparkContext(conf)

    //之指定checkpoint 保存的路径
    sc.setCheckpointDir("data/checkpoint")

    val studentRDD: RDD[String] = sc.textFile("data/students.txt")


    val studentsRDD: RDD[(String, String, Int, String, String)] = studentRDD.map(student => {

      println("studentsRDD处理")

      val split: Array[String] = student.split(",")
      val id: String = split(0)
      val name: String = split(1)
      val age: Int = split(2).toInt
      val gender: String = split(3)
      val clazz: String = split(4)

      (id, name, age, gender, clazz)
    })

    /**
      * checkpoint 流程
      * 1、当第一个job 执行完成之后会从后面前回溯，如果rdd调用了checkpoint方法会被打上一个标记
      * 2、另启动一个job重新计算这个rdd的数据，将rdd的数据保存到hdfs
      * 3、后续的job 就可以直接使用checkpoint中的数据
      *
      * 优化：可以在checkpoint执行进行cahce
      *
      * checkpoint主要用在Spark streaming
      */

    studentsRDD.cache()
    studentsRDD.checkpoint()


    //班级人数
    val clazzNum: RDD[(String, Int)] = studentsRDD.map(stu => (stu._5, 1)).reduceByKey(_ + _)

    clazzNum.foreach(println)

    //性别的人数
    val genderNum: RDD[(String, Int)] = studentsRDD.map(stu => (stu._4, 1)).reduceByKey(_ + _)

    genderNum.foreach(println)


    //性别人数
    val ageNumRDD: RDD[(Int, Int)] = studentsRDD.map(stu => (stu._3, 1)).reduceByKey(_ + _)
    ageNumRDD.foreach(println)

  }
}
