package com.xbai.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Spark 目前支持 Hash 分区和 Range 分区，用户也可以自定义分区，
  * Hash 分区为当前的默认分区，Spark 中分区器直接决定了 RDD 中分区的个数、
  * RDD 中每条数据经过 Shuffle 过程属于哪个分区和 Reduce 的个数
  *
  * @author xbai
  * @Date 2021/1/2
  */
object Spark_Partitioner {

  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("Partitioner")
    val sc = new SparkContext(conf)

    val rdd: RDD[(Int, Int)] = sc.parallelize(List((1,1),(2,2),(3,3)))
    println(rdd.partitioner) // None

    import org.apache.spark.HashPartitioner
    val hashPartitionRDD: RDD[(Int, Int)] = rdd.partitionBy(new HashPartitioner(2))
    println(hashPartitionRDD.partitioner) // Some(org.apache.spark.HashPartitioner@2)

    val nopar: RDD[(Int, Int)] = sc.parallelize(List((1,3),(1,2),(2,4),(2,3),(3,6),(3,8)),8)
    nopar.mapPartitionsWithIndex((index, iter) => Iterator(index.toString + ":" + iter.mkString("|"))).collect().foreach(println)

    val hashpar: RDD[(Int, Int)] = nopar.partitionBy(new HashPartitioner(7))
    hashpar.mapPartitionsWithIndex((index, iter) => Iterator(index.toString + ":" + iter.mkString("|"))).collect().foreach(println)

    sc.stop()
  }
}
