package com.fwmagic.spark.core.cases.groupwithtopn

import org.apache.spark.Partitioner
import scala.collection.mutable.HashMap

/**
  * 自定义分区器，默认用的是HashPartitioner
  * 继承Partitioner抽象类
  * 实现：一个分区中只有一个学科的数据
  */

class SubjectPartitioner2(val subject: Array[String]) extends Partitioner {

    private val rules = new HashMap[String, Integer]()

    //初始化分区器规则
    for (index <- 0 until subject.size) {
        rules(subject(index)) = index
    }

    override def numPartitions: Int = subject.size

    /**
      * 获取指定分区,该方法会在Exeuctor端中被调用
      *
      * @param key
      * @return
      */
    override def getPartition(key: Any): Int = {
        val tp: (String, String, Integer) = key.asInstanceOf[(String, String, Integer)]
        val subject: String = tp._1
        rules(subject)
    }
}