package com.hngy.scala.stream.transformation

import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.api.scala._

/**
  * 分区规则的使用
  */
object StreamPartitionOpScala {

  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment

    //注意：默认情况下Fink任务中算子的并行度会读取当前机器的CPU个数
    //fromCollection的并行度为1，由源码可知
    val text = env.fromCollection(Array(1,2,3,4,5,6,7,8,9,10))

    //使用shuffle分区规则
    //shuffleOp(text)

    //使用rebalance分区规则
    //rebalanceOp(text)

    //使用rescale分区规则
    //rescaleOp(text)

    //使用broadcast分区规则，此代码一共会打印40条数据，因为print的并行度为4
    //broadcastOp(text)

    //自定义分区规则：根据数据的奇偶性进行分区
    //注意：此时虽然print算子的并行度是4，但是自定义的分区规则只会把数据分发给2个并行
    custormPartitionOp(text)

    env.execute("StreamPartitionOpScala")
  }

  def custormPartitionOp(text: DataStream[Int]) = {
    text.map(num=>num)
      .setParallelism(2)
      .partitionCustom(new MyPartitionerScala,num=>num) //官方建议使用keySelector
      .print()
      .setParallelism(4)
  }

  def broadcastOp(text: DataStream[Int]) = {
    text.map(num=>num)
      .setParallelism(2)
      .broadcast
      .print()
      .setParallelism(4)
  }

  def rescaleOp(text: DataStream[Int]) = {
    text.map(num=>num)
      .setParallelism(2)
      .rescale
      .print()
      .setParallelism(4)
  }

  def rebalanceOp(text: DataStream[Int]) = {
    text.map(num=>num)
      .setParallelism(2) //设置map的并行度是2
      .rebalance
      .print()
      .setParallelism(4) //设置print算子的并行度是4
  }

  def shuffleOp(text: DataStream[Int]) = {
    //由于fromCollection已经设置了并行度为1，所以需要再接一个算子之后才能修改并行度
    text.map(num=>num)
      .setParallelism(2)//设置map的并行度是2
      .shuffle
      .print()
      .setParallelism(2)//设置print算子的并行度是2
  }
}
