package com.shujia.spark.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel

object Demo7Kyyo {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("cache")
      .config("spark.sql.shuffle.partitions", 1)
      //序列化方式
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //指定注册序列化的类，自定义
      .config("spark.kryo.registrator", "com.shujia.spark.opt.Demo8KryoRegister")
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext

    val studentsRDD: RDD[String] = sc.textFile("data/students.txt")

    /**
     * 将rdd中一行数据转换成Student的对象
     *
     */
    val stuRDD: RDD[Student] = studentsRDD.map(stu => {
      val split: Array[String] = stu.split(",")
      Student(split(0), split(1), split(2).toInt, split(3), split(4))
    })

    /**
     * 不做使用序列化，数据是280K
     * 使用默认的序列化的方式: 数据是55K
     * 使用kryo进行序列化： 数据大小：43k
     *
     *
     * spark sql 默认已经使用了kryo进行序列化， rdd没有使用，需要自己实现
     *
     */

    stuRDD.persist(StorageLevel.MEMORY_ONLY_SER)


    stuRDD
      .map(stu => (stu.clazz, 1))
      .reduceByKey(_ + _)
      .map {
        case (clazz: String, num: Int) =>
          s"$clazz\t$num"
      }
      .foreach(println)


    /**
     * 统计性别的人数
     *
     */

    stuRDD
      .map(stu => (stu.gender, 1))
      .reduceByKey(_ + _)
      .map {
        case (gender: String, num: Int) =>
          s"$gender\t$num"
      }
      .foreach(println)


    while (true) {

    }

  }

  case class Student(id: String, name: String, age: Int, gender: String, clazz: String)
}
