package com.shujia.youhua

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel

object Demo7Kryo {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      //将序列化方式设置为Kryo的序列化方式
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //自定义一个序列化类，指定要序列化的东西
      .config("spark.kryo.registrator", "com.shujia.youhua.Demo8Kryo")
      .master("local")
      .appName("缓存优化")
      .getOrCreate()

    val sparkContext: SparkContext = sparkSession.sparkContext


    val studentsRDD: RDD[Student2] = sparkContext.textFile("spark/data/students.txt")
      .map(_.split(",") match {
        case Array(id: String, name: String, age: String, gender: String, clazz: String) =>
          Student2(id, name, age.toInt, gender, clazz)
      })

    /**
     * 第二次job作业使用的数据大小
     * 未使用序列化进行缓存：238.3 KiB
     * 使用是默认的序列化方式：65.4 KiB
     * 使用kryo序列化：43.0 KiB
     */
    //    studentsRDD.cache() // 默认的缓存级别是MEMORY_ONLY
    studentsRDD.persist(StorageLevel.MEMORY_ONLY_SER)

    //需求1：计算每个班的人数
    val resRDD: RDD[(String, Int)] = studentsRDD.groupBy(_.clazz)
      .map((kv: (String, Iterable[Student2])) => {
        (kv._1, kv._2.toList.size)
      })
    resRDD.foreach(println)


    //需求1：计算性别的人数
    val resRDD2: RDD[(String, Int)] = studentsRDD.groupBy(_.gender)
      .map((kv: (String, Iterable[Student2])) => {
        (kv._1, kv._2.toList.size)
      })
    resRDD2.foreach(println)

//    /**
//     * 计算每个班级的人数
//     */
//    val resRDD: RDD[(String, Int)] = studentsRDD.map((stu:Student2)=>(stu.clazz,1)).reduceByKey(_ + _)
//    resRDD.foreach(println)
//
//    /**
//     * 计算每个性别的人数
//     */
//    val resRDD2: RDD[(String, Int)] = studentsRDD.map((stu:Student2)=>(stu.gender,1)).reduceByKey(_ + _)
//    resRDD2.foreach(println)
    while (true) {

    }
  }
}

case class Student2(id:String,name:String,age:Int,gender:String,clazz:String)