package com.desheng.bigdata.flink.batch.transformation

import org.apache.flink.api.common.functions.Partitioner
import org.apache.flink.api.scala._
import org.apache.flink.core.fs.FileSystem.WriteMode

import scala.collection.mutable

/**
  * datastream分区操作
  *     一种基于hash分区--partitionByHash
  *     一种基于range范围--partitionByRange
  *     自定义分区--partitionByCustom
  */
object _02Transformation4PartitionOps {
    def main(args: Array[String]): Unit = {
        val env = ExecutionEnvironment.getExecutionEnvironment
        env.setParallelism(2)
        val workers:DataSet[Worker] = env.fromCollection(List(
            Worker(1, "吴延俊", 25, "研发部"),
            Worker(2, "宋建华", 26, "产品部"),
            Worker(3, "崔蒙恩", 58, "小卖部"),
            Worker(4, "小岚岚", 48, "小卖部"),
            Worker(5, "成思远", 68, "后勤部"),
            Worker(6, "李向闪", 26, "产品部")
        ))

        println("-----------------partition by hash--------------------")
        workers.partitionByHash("id")
            .mapPartition(partition => {
                val list = partition.toList
                println("partition: " + list)
                list
            })
        println("------------------partition by range-------------------")
        workers.partitionByRange("age")
            .mapPartition(partition => {
                val list = partition.toList
                println("partition: " + list)
                list
            })
        println("------------------partition by custom-------------------")
        val depts = workers.map(w => w.dept).distinct().collect()

        workers.partitionCustom(new DeptPartitioner(depts), worker => worker.dept)
            //setParallelism 是对当前操作对应的datastream/dataset设置分区
            .setParallelism(depts.size)
            .mapPartition(partition => {
                val list = partition.toList
                println("partition: " + list)
                list
            })
            .writeAsText("file:/E:/data/out/flink/partition", WriteMode.OVERWRITE)

        env.execute("partitionByCustom")
    }
}
case class Worker(id: Int, name: String, age: Int, dept: String)

/**
  * 自定义分组分区，按照某一个特定的字段完成分组
  * 需要构建一个dept--->partition一个映射表
  *
  */
class DeptPartitioner(depts: Seq[String]) extends Partitioner[String] {
    val dept2Index = {
        println("depts: " + depts.toList)
        val map = mutable.Map[String, Int]()
        for(i <- 0 until depts.length) {
            map.put(depts(i), i)
        }
        map.toMap
    }

    override def partition(dept: String, numPartitions: Int): Int = {
        dept2Index.getOrElse(dept, 0)
    }
}
