package com.shujia.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel

object Demo21CheckPoint {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
    conf.setAppName("Demo21CheckPoint")
    conf.setMaster("local")

    val sc: SparkContext = new SparkContext(conf)

    // 如果需要使用checkpoint则需提前设置checkpoint的路径 一般为HDFS中的路径
    sc.setCheckpointDir("spark/data/ck")

    val stuRDD: RDD[String] = sc.textFile("spark/data/students.txt")

    val stuMapRDD: RDD[String] = stuRDD.map(line => {
      println("读取了文件")
      line
    })

    stuMapRDD.cache()

    /**
     * checkpoint ：检查点 类似VMWare中的拍快照
     * 主要适用于Spark Streaming中，保存某一时刻程序的状态（某一时刻的运行结果）
     * 出于容错机制或者可靠性保证，一般会使用HDFS保存状态，以此保证状态不丢失
     *
     * 当程序完成checkpoint 则可以切断之前RDD之间的联系
     *
     * 在做checkpoint时会从头执行一遍，相当于又会重新读一次文件，所以可以在checkpoint之前进行缓存
     *
     */
    stuMapRDD.checkpoint()

    // 统计性别人数
    val genderRDD: RDD[(String, Int)] = stuMapRDD.map(line => (line.split(",")(3), 1))
    val genderCnt: RDD[(String, Int)] = genderRDD.reduceByKey(_ + _)
    genderCnt.foreach(println)

    val genderRDD1: RDD[(String, Int)] = stuMapRDD.map(line => (line.split(",")(3), 1))
    val genderCnt1: RDD[(String, Int)] = genderRDD1.reduceByKey(_ + _)
    genderCnt1.foreach(println)

    val genderRDD2: RDD[(String, Int)] = stuMapRDD.map(line => (line.split(",")(3), 1))
    val genderCnt2: RDD[(String, Int)] = genderRDD2.reduceByKey(_ + _)
    genderCnt2.foreach(println)

    // 统计班级人数
    val clazzRDD: RDD[(String, Int)] = stuMapRDD.map(line => (line.split(",")(4), 1))
    val clazzCnt: RDD[(String, Int)] = clazzRDD.reduceByKey(_ + _)
    clazzCnt.foreach(println)

    // 统计班级最大年龄
    val clazzAgeRDD: RDD[(String, Int)] = stuMapRDD.map(line => (line.split(",")(4), line.split(",")(2).toInt))
    val clazzMaxAge: RDD[(String, Int)] = clazzAgeRDD.reduceByKey((age1, age2) => Math.max(age1, age2))
    clazzMaxAge.foreach(println)
  }

}
