package com.xiaohu.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel

object Demo7Kryo {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      .config("spark.sql.shuffle.partitions", "1")
      //将序列化方式设置为Kryo的序列化方式
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //自定义一个序列化类，指定要序列化的东西
      .config("spark.kryo.registrator", "com.shujia.opt.Demo8Kryo")
      .master("local[2]")
      .appName("缓存优化")
      .getOrCreate()

    val sparkContext: SparkContext = sparkSession.sparkContext


    val studentsRDD: RDD[Student] = sparkContext.textFile("spark/data/students.txt")
      .map(_.split(",") match {
        case Array(id: String, name: String, age: String, gender: String, clazz: String) =>
          Student(id, name, age.toInt, gender, clazz)
      })

    /**
     * 第二次job作业使用的数据大小
     * 未使用序列化进行缓存：238.3 KiB
     * 使用是默认的序列化方式：65.4 KiB
     * 使用kryo序列化：43.0 KiB
     */
//    studentsRDD.cache() // 默认的缓存级别是MEMORY_ONLY
    studentsRDD.persist(StorageLevel.MEMORY_ONLY_SER)

    /**
     * 计算每个班级的人数
     */
    val resRDD: RDD[(String, Int)] = studentsRDD.map((stu:Student)=>(stu.clazz,1)).reduceByKey(_ + _)
    resRDD.foreach(println)

    /**
     * 计算每个性别的人数
     */
    val resRDD2: RDD[(String, Int)] = studentsRDD.map((stu:Student)=>(stu.gender,1)).reduceByKey(_ + _)
    resRDD2.foreach(println)
    while (true) {

    }
  }
}

case class Student(id:String,name:String,age:Int,gender:String,clazz:String)