package com.shujia.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel

object Demo1Cache {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local")
      .appName("cache")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()
    val sparkContext: SparkContext = sparkSession.sparkContext

    val studentsRDD: RDD[String] = sparkContext.textFile("spark/data/students.csv")



    /**
     * 尽量避免使用重复的RDD，避免了之前所有的RDD重复计算
     *
     * rdd
     * df
     * sql
     *
     */
    //针对被复用的rdd进行缓存
//    studentsRDD.cache() // 默认是MEMORY_ONLY
    studentsRDD.persist(StorageLevel.MEMORY_AND_DISK_SER)

    /**
     * 统计每个班级的人数
     */
    studentsRDD.map((line: String) => (line.split(",")(4), 1))
      .reduceByKey(_ + _)
      .saveAsTextFile("spark/data/opt/clazz_num")

    /**
     * 统计每个性别的人数
     *
     * 第一次作用用到studentsRDD的时候是原本的数据量大小，当第二个作业也用到studentsRDD数据的时候，就去缓存中寻找数据
     */
    studentsRDD.map((line: String) => (line.split(",")(3), 1))
      .reduceByKey(_ + _)
      .saveAsTextFile("spark/data/opt/gender_num")

    while (true){

    }

  }
}
