package com.doit.spark.day03

import com.doit.spark.day01.utils.SparkUtil
import org.apache.spark.HashPartitioner
import org.apache.spark.rdd.RDD

/**
 * @DATE 2022/1/5/15:10
 * @Author MDK
 * @Version 2021.2.2
 * */
object C08_分区器验证 {
  def main(args: Array[String]): Unit = {
    val sc = SparkUtil.getSc
    val rdd  = sc.makeRDD(List[(String, Int)]("a" -> 1, "b" -> 2, ("c", 3), "d" -> 4),2)

    /*
    *  groupByKey  新的RDD
    *   分区器默认  HashPartitioner
    *   groupByKey()不传值默认分区数为2, rdd不指定分区数  默认分区数  等于最大逻辑核数8核/16核
    *   List((d,CompactBuffer(4)), (b,CompactBuffer(2)))
    *   List((a,CompactBuffer(1)), (c,CompactBuffer(3)))
    * */
    val rdd2: RDD[(String, Iterable[Int])] = rdd.groupByKey()
    val resRDD: RDD[List[(String, Iterable[Int])]] = rdd2.glom().map(_.toList)
    resRDD.foreach(println)
//    rdd.groupByKey().glom().map(_.toList).foreach(println)

    val resRDD2: RDD[(String, Int)] = rdd.partitionBy(new HashPartitioner(2) {
      override def getPartition(key: Any): Int = {
       Math.abs(key.asInstanceOf[String].hashCode % numPartitions)
      }
    })
    resRDD2.glom().map(_.toList).foreach(println)
  }
}
