package com.xbai.spark.core.transform

import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}

/**
  * partitionBy 算子
  * 作用：对 pairRDD 进行分区操作，如果原有的 partionRDD 和现有的 partionRDD 是一致的话就不进行分区， 否则会生成 ShuffleRDD，即会产生 shuffle 过程。
  * 案例：创建一个4个分区的RDD，对其重新分区
  * @author xbai
  * @Date 2020/12/30
  */
object Spark15_PartitionBy {

  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("partitionBy")
    val sc = new SparkContext(conf)

    val rdd: RDD[(Int, String)] = sc.parallelize(Array((1, "a"), (2, "b"), (3, "c"), (4, "d")), 4)
    println("缩减分区前 = " + rdd.partitions.length)
    val partitionByRDD: RDD[(Int, String)] = rdd.partitionBy(new org.apache.spark.HashPartitioner(2))
    println("缩减分后 = " + partitionByRDD.partitions.length)

    val myPartitionerRDD: RDD[(Int, String)] = rdd.partitionBy(new MyPartitioner(3))
    myPartitionerRDD.saveAsTextFile("output")
  }
}

// 自定义分区器
// 声明分区器
// 继承 Partitioner 类
class MyPartitioner(partitions: Int) extends Partitioner {
  override def numPartitions: Int = {
    partitions
  }

  override def getPartition(key: Any): Int = {
    1
  }
}
