package com.doitedu.core2

import com.doitedu.utils.SparkUtil
import org.apache.spark.HashPartitioner
import org.apache.spark.rdd.RDD

/**
 * @Date: 22.7.7 
 * @Author: HANGGE
 * @qq: 598196583
 * @Tips: 学大数据 ,到多易教育
 * @Description:
 */

case  class  Teacher(id:Int , name:String)
object C10_序列化 {
  def main(args: Array[String]): Unit = {
    val sc = SparkUtil.getSparkContext("序列化")
    val rdd = sc.parallelize(List(1, 2, 3, 4), 2)

    val rdd2= rdd.map(e => {
      val teacher = Teacher(1, "雨哥")
      (e, teacher)
    })
    // shuffle的时候需要将Teacher序列化到磁盘
    rdd2.groupBy(_._1>2).foreach(println)


    sc.stop

  }
}
