package com.shujia.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel

object Demo16Catch {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("Action算子演示")
    val context = new SparkContext(conf)
    //设置checkpoint路径，将来对应的是HDFS上的路径
    context.setCheckpointDir("spark/data/checkpoint")
    //====================================================

    val linesRDD: RDD[String] = context.textFile("spark/data/students.csv")

    val splitRDD: RDD[Array[String]] = linesRDD.map(_.split(","))

    //处理数据
    val studentsRDD: RDD[(String, String, String, String, String)] = splitRDD.map {
      case Array(id: String, name: String, age: String, gender: String, clazz: String) =>
        (id, name, age, gender, clazz)
    }

    //对studentsRDD进行缓存
    /**
     * 特点带来的问题：既然叫做缓存，所以在程序运行过程中无论是只放内存还是磁盘内存一起使用，一旦程序结束，缓存数据全部丢失。
     *
     * spark针对上面的场景提供了一个解决方案：可以将RDD运行时的数据永久持久化在HDFS上，这个方案叫做checkpoint,需要在spark环境中设置checkpoint的路径
     */
    //    studentsRDD.cache() //默认情况下，是将数据缓存在内存中
//    studentsRDD.persist(StorageLevel.MEMORY_AND_DISK)
      studentsRDD.checkpoint()


    //统计每个班级的人数
    val clazzKVRDD: RDD[(String, Int)] = studentsRDD.map {
      case (_, _, _, _, clazz: String) => (clazz, 1)
    }
    val clazzNumRDD: RDD[(String, Int)] = clazzKVRDD.reduceByKey(_ + _)
    clazzNumRDD.saveAsTextFile("spark/data/clazz_num")

    //统计性别的人数
    val genderKVRDD: RDD[(String, Int)] = studentsRDD.map {
      case (_, _, _, gender: String, _) => (gender, 1)
    }
    val genderNumRDD: RDD[(String, Int)] = genderKVRDD.reduceByKey(_ + _)
    genderNumRDD.saveAsTextFile("spark/data/gender_num")
//
//    while (true){
//
//    }


  }
}
