package com.offcn.bigdata.dataset.transformation


import org.apache.flink.api.common.functions.Partitioner
import org.apache.flink.api.scala._

import scala.collection.mutable

/**
 * @Auther: BigData-LGW
 * @ClassName: PartitionByCustom
 * @Date: 2020/12/16 22:47
 * @功能描述: $FunctionDescription
 * @Version:1.0
 */
object PartitionByCustom {
    def main(args: Array[String]): Unit = {
        val env = ExecutionEnvironment.getExecutionEnvironment
        val persons = env.fromCollection(List(
            Person(1, "刘国伟", 18, 0, "辽宁"),
            Person(2, "马惠", 19, 1, "辽宁"),
            Person(3, "小涛涛", 29, 0, "辽宁"),
            Person(4, "刘鑫", 20, 0, "贵州"),
            Person(5, "郑朝义", 22, 0, "贵州"),
            Person(6, "韩香彧", 20, 1, "内蒙古"),
            Person(7, "佟凯", 22, 1, "内蒙古"),
            Person(8, "刘照路", 30, 1, "辽宁")
        ))
        val provinces:Seq[String] = persons.map(person => (person.province,1))
            .distinct(0)
            .collect()
            .map(kv => kv._1)
        val numPartitions = provinces.length
        val partitioned = persons.partitionCustom(new MyGroupedPartitioner(provinces),person => person.province)
            .setParallelism(numPartitions)
        partitioned.mapPartition(ps => {
            val list = ps.toList
            println("partitionByRange内容：" + list.mkString("[",  ", ",  "]"))
            list
        }).print()
    }
}
class MyGroupedPartitioner(province: Seq[String]) extends Partitioner[String]{
    val province2Index = {
        val map = mutable.Map[String,Int]()
        for (index <- 0 until province.length){
            map.put(province(index),index)
        }
        map.toMap
    }
    override def partition(key: String, numPartitions: Int): Int = {
        province2Index.getOrElse(key, 0)
    }
}