package com.shujia.youhua

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel

object KryoDemo {
  def main(args: Array[String]): Unit = {
    val ss: SparkSession = SparkSession.builder()
      //将序列化方式设置为Kryo的序列化方式
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //自定义一个序列化类，指定要序列化的东西
      .config("spark.kryo.registrator", "com.shujia.youhua.CustomKryo")
//      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer") // 告诉spark程序将来序列化的时候使用kryo序列化
//      .config("spark.kryo.registrator","com.shujia.youhua.CustomKryo") // 告诉spark程序将来kryo序列化的内容是什么
      .master("local")
      .appName("缓存，避免使用重复的RDD")
      .getOrCreate()
    val sc: SparkContext = ss.sparkContext

    val lineRDD: RDD[String] = sc.textFile("spark/data/students.txt")
    val stuArrRDD: RDD[Student] = lineRDD.map(_.split(","))
      .map {
        case Array(id: String, name: String, age: String, gender: String, clazz: String) => Student(id, name, age.toInt, gender, clazz)
      }

    //对重复利于的RDD进行持久化
//        stuArrRDD.cache() // MEMORY_ONLY默认是内存持久化 238.3 KiB
    //若内存放不下，修改缓存级别
    stuArrRDD.persist(StorageLevel.MEMORY_ONLY_SER) // 55.7 KiB 使用kryo序列化后:43.0 KiB
    //    stuArrRDD.persist(StorageLevel.DISK_ONLY)
//    stuArrRDD.persist(StorageLevel.MEMORY_AND_DISK_SER) // 内存不够，会选择放磁盘


    //需求1：计算每个班的人数
    val resRDD: RDD[(String, Int)] = stuArrRDD.groupBy(_.clazz)
      .map((kv: (String, Iterable[Student])) => {
        (kv._1, kv._2.toList.size)
      })
    resRDD.foreach(println)


    //需求1：计算性别的人数
    val resRDD2: RDD[(String, Int)] = stuArrRDD.groupBy(_.gender)
      .map((kv: (String, Iterable[Student])) => {
        (kv._1, kv._2.toList.size)
      })
    resRDD2.foreach(println)

    while (true){

    }
  }
}

case class Student(id: String, name: String, age: Int, gender: String, clazz: String)
