package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}

case class Student(id: String, name: String, age: Int, sex: String, clazz: String)

object Demo15Cache {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("Demo15Cache")
    val sc = new SparkContext(conf)

    val linesRDD: RDD[String] = sc.textFile("data/students.txt")


    val studentsRDD: RDD[Student] = linesRDD
      .map(_.split(","))
      .map { case Array(id, name, age, sex, clazz) => Student(id, name, age.toInt, sex, clazz) }

    /**
     * 缓存
     * 当同一个RDD被多次使用时，可以将RDD的数据缓存起来
     * cache默认将数据缓存在Executor的内存中
     *
     * 缓存级别的选择
     * 1、MEMORY_ONLY:仅内存，适用内存充足数据量不是很大时
     * 2、MEMORY_ONLY_SER：压缩之后放内存，CPU的时间换内存空间
     * 3、MEMORY_AND_DISK_SER:压缩之后放内存，放不下在放磁盘
     *
     */

    studentsRDD.cache()
    //    studentsRDD.persist(StorageLevel.MEMORY_ONLY_SER)


    //统计班级的人数
    studentsRDD
      .map(stu => (stu.clazz, 1))
      .reduceByKey(_ + _)
      .foreach(println)

    //统计性别的人数
    studentsRDD
      .map(stu => (stu.sex, 1))
      .reduceByKey(_ + _)
      .foreach(println)

    //统计年龄的人数
    studentsRDD
      .map(stu => (stu.age, 1))
      .reduceByKey(_ + _)
      .foreach(println)

    while (true) {}
  }

}
