package flink_p1

import org.apache.flink.api.common.functions.Partitioner
import org.apache.flink.api.scala.createTypeInformation
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment

object FlinkTest_11_Partitioner {

  def main(args: Array[String]): Unit = {

    val env = StreamExecutionEnvironment.getExecutionEnvironment
    val stream = env.generateSequence(1, 10).setParallelism(2)

    /**
     * 不指定分区方式： 结果：将10条数据均分给下游所有分区
     */
    stream.writeAsText("./data/stream1").setParallelism(2)


    /**
     * 随机分发到下游
     * random.nextInt(numberOfChannels);
     */
    stream.shuffle.writeAsText("./data/stream2").setParallelism(4)

    /**
     * 跟随机差不多
     * nextChannelToSendTo = (nextChannelToSendTo + 1) % numberOfChannels
     */
    stream.rebalance.writeAsText("./data/stream3").setParallelism(4)

    /**
     * rescale
     *  if (++nextChannelToSendTo >= numberOfChannels) {
     *  nextChannelToSendTo = 0;
     *  }
     *  return nextChannelToSendTo;
     */
    stream.rescale.writeAsText("./data/stream4").setParallelism(4)


    //broadcast : 给每一个分区都分发一份
    stream.broadcast.writeAsText("./data/stream5").setParallelism(4)


    /**
     * global : 只分发给下游的第一个分区,尽量让数据发给当前节点的subtask
     * return 0
     */
    stream.global.writeAsText("./data/stream6").setParallelism(4)


    /**
     * forward : 一对一分发，也就是说上下游的分区数必须一致
     * 类似spark中的窄依赖
     */
    stream.forward.writeAsText("./data/stream7").setParallelism(2)


    /**
     * 根据key的hash值取模分发
     */
    stream.map((_, 1)).keyBy(0).writeAsText("./data/stream8").setParallelism(2)


    /**
     * 自定义分区器
     */
    class customPartitioner extends Partitioner[Long] {
      override def partition(key: Long, numPartitions: Int): Int = {
        key.toInt % numPartitions
      }
    }

    stream.map((_, 1)).partitionCustom(new customPartitioner(), 0).writeAsText("./data/stream9").setParallelism(4)


    env.execute()


    /**
     * 使用场景：
     * shuffle/reblance : 均衡数据、提高并行度
     *
     */
  }

}
