package com.study.spark.scala.rdd

import org.apache.spark.{Partitioner, SparkConf, SparkContext}

import scala.collection.mutable

object PartitionDemo {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("Partition Demo")
      .setMaster("local[*]")
    val sc = new SparkContext(conf)

    val rdd = sc.parallelize(List(("a", 1), ("b", 1), ("c", 1), ("d", 1), ("c", 1), ("b", 1), ("a", 1), ("a", 1)))

    val result = rdd.reduceByKey(new MyPartitioner(4),_ + _)
      .mapPartitions(it => it.toList.sortBy(_._2).reverse.take(2).iterator)
      .collect()

    println(result.toBuffer)

    sc.stop()

  }
}

class MyPartitioner(nums: Int) extends Partitioner {
  // 定义分区规则
  val rules = new mutable.HashMap[String, Int]()
  rules("a") = 0
  rules("b") = 1
  rules("c") = 2
  rules("d") = 3

  // 返回分区数（下一个RDD有多少个分区）
  override def numPartitions: Int = nums

  // 根据传入的key计算出分区编号
  override def getPartition(key: Any): Int = {
    rules(key.toString)
  }
}