package com.shujia.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo17Checkpoint {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setMaster("local")
      .setAppName("map")


    val sc = new SparkContext(conf)


    //设置checkpoint的路径
    sc.setCheckpointDir("spark/data/checkpoint")

    val student: RDD[String] = sc.textFile("spark/data/students.txt")


    val mapStudent: RDD[String] = student.map(line => {
      println("map----")
      line
    })

    /**
      * checkpoint: 将rdd的数据保存到hdfs中,
      * 1、当第一个job执行完成之后会从最后一个rdd向前回溯，对调用了checkpoint的rdd打上标记
      * 2、另启动一个job重新计算rdd的数据，并将rdd的数据保存到hdfs
      *
      * 在checkpoint之前进行cache 优化效率
      *
      *
      * 主要在spark streaming 中使用
      *
      */

    mapStudent.cache()

    mapStudent.checkpoint()


    //统计班级的人数
    val clazzKVRDD: RDD[(String, Int)] = mapStudent.map(line => (line.split(",")(4), 1))
    val clazzNumRDD: RDD[(String, Int)] = clazzKVRDD.reduceByKey(_ + _)

    clazzNumRDD.foreach(println)


    //统计性别的人数


    val genderKVRDD: RDD[(String, Int)] = mapStudent.map(line => (line.split(",")(3), 1))
    val genderNumRDD: RDD[(String, Int)] = genderKVRDD.reduceByKey(_ + _)

    genderNumRDD.foreach(println)


    val genderKVRDD1: RDD[(String, Int)] = mapStudent.map(line => (line.split(",")(3), 1))
    val genderNumRDD1: RDD[(String, Int)] = genderKVRDD1.reduceByKey(_ + _)

    genderNumRDD1.foreach(println)


  }
}
