package com.shujia.youhua

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel

object CacheDemo {
  def main(args: Array[String]): Unit = {
    val ss: SparkSession = SparkSession.builder()
      .master("local")
      .appName("缓存，避免使用重复的RDD")
      .getOrCreate()
    val sc: SparkContext = ss.sparkContext

    val lineRDD: RDD[String] = sc.textFile("spark/data/students.txt")
    val stuArrRDD: RDD[(String, String, Int, String, String)] = lineRDD.map(_.split(","))
      .map {
        case Array(id: String, name: String, age: String, gender: String, clazz: String) => (id, name, age.toInt, gender, clazz)
      }

    //对重复利于的RDD进行持久化
//    stuArrRDD.cache() // MEMORY_ONLY默认是内存持久化 238.4kb
    //若内存放不下，修改缓存级别
    stuArrRDD.persist(StorageLevel.MEMORY_ONLY_SER) // 57.4kb
//    stuArrRDD.persist(StorageLevel.DISK_ONLY)
    stuArrRDD.persist(StorageLevel.MEMORY_AND_DISK_SER) // 内存不够，会选择放磁盘


    //需求1：计算每个班的人数
    val resRDD: RDD[(String, Int)] = stuArrRDD.groupBy(_._5)
      .map((kv: (String, Iterable[(String, String, Int, String, String)])) => {
        (kv._1, kv._2.toList.size)
      })
    resRDD.foreach(println)


    //需求1：计算性别的人数
    val resRDD2: RDD[(String, Int)] = stuArrRDD.groupBy(_._4)
      .map((kv: (String, Iterable[(String, String, Int, String, String)])) => {
        (kv._1, kv._2.toList.size)
      })
    resRDD2.foreach(println)

    while (true){

    }
  }
}
