package com.shujia.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel

object Demo1Cache {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      .config("spark.sql.shuffle.partitions", "1")
      .master("local[2]")
      .appName("缓存优化")
      .getOrCreate()

    val sparkContext: SparkContext = sparkSession.sparkContext


    val stuRDD: RDD[String] = sparkContext.textFile("spark/data/students.txt")

    /**
     * 缓存的目的是避免每一次job作业执行的时候，都需要从第一个rdd算起
     * 对重复使用RDD进行缓存
     * cache 设置不了缓存级别
     * persist 可以设置缓存级别
     */
//    stuRDD.cache() // 默认的缓存级别是MEMORY_ONLY
    stuRDD.persist(StorageLevel.MEMORY_ONLY_SER)

    /**
     * 计算每个班级的人数
     */
    val resRDD: RDD[(String, Int)] = stuRDD.map(_.split(",") match {
      case Array(_, _, _, _, clazz: String) =>
        (clazz, 1)
    }).reduceByKey(_ + _)

    resRDD.foreach(println)

    /**
     * 计算每个性别的人数
     */
    val resRDD2: RDD[(String, Int)] = stuRDD.map(_.split(",") match {
      case Array(_, _, _, gender:String, _) =>
        (gender, 1)
    }).reduceByKey(_ + _)

    resRDD2.foreach(println)

    while (true){

    }
  }
}
