package day02.operator.transformation

import org.apache.spark.{Partitioner, SparkConf, SparkContext}

/**
 * @Author wsl
 * @Description 数据类型是(k,v)
 * hash
 * ranger：
 * 自定义
 */
object KeyValue_PartitionBy {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("rdd").setMaster("local[*]")
    val sc = new SparkContext(conf)

    sc.makeRDD(Array((1, "aaa"), (2, "bbb"), (3, "ccc"),(4, "ddd"),(5, "eee")), 3)
     // .partitionBy(new HashPartitioner(2)) //弊端会导致数据倾斜
      .partitionBy(new MyPartitioner(2))
      .mapPartitionsWithIndex(
        (index, iters) => {
          iters.map((index, _))
        }
      ).collect().foreach(println)


    sc.stop()
  }
}

//自定义分区器
class MyPartitioner(num: Int) extends Partitioner {

  //分区数
  override def numPartitions: Int = num

  //返回分区编标(0,partition-1) 自定义分区规则：按照奇偶分区
  override def getPartition(key: Any): Int = {
    if (key.isInstanceOf[Int]) {
      val keyInt: Int = key.asInstanceOf[Int]
      if (keyInt % 2 == 0) {
        0
      } else 1
    } else {
      0
    }
  }

  //判断相等的方法
  override def equals(other: Any): Boolean = other match {
    case h: MyPartitioner =>
      h.numPartitions == numPartitions
    case _ =>
      false
  }

}