package com.shujia.spark.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel

object Demo1Cache {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("cache")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext

    val studentsRDD: RDD[String] = sc.textFile("data/students.txt")

    /**
     * 当对同一个rdd进行多次使用的时候可以将rdd缓存起来
     *
     */

    //缓存级别是MEMORY_ONLY
    //studentsRDD.cache()

    //内存放不下放磁盘，同时会对数据做序列化，将一个分区的数据序列化从一个字节数组
    studentsRDD.persist(StorageLevel.MEMORY_AND_DISK_SER)


    /**
     * rdd: rdd.cache
     * df : df.cache
     * sql: cache table student,  uncache table studnet
     */

    /**
     * 统计班级的的人数
     *
     */
    studentsRDD
      .map(stu => (stu.split(",")(3), 1))
      .reduceByKey(_ + _)
      .map {
        case (clazz: String, num: Int) =>
          s"$clazz\t$num"
      }
      .saveAsTextFile("data/cache/clazz_num")


    /**
     * 统计性别的人数
     *
     */

    studentsRDD
      .map(stu => (stu.split(",")(3), 1))
      .reduceByKey(_ + _)
      .map {
        case (gender: String, num: Int) =>
          s"$gender\t$num"
      }
      .saveAsTextFile("data/cache/gender_num")


    /**
     * 清空缓存
     */
    studentsRDD.unpersist()


    while (true) {

    }

  }

}
