package cn.tedu.batch.transformation

import org.apache.flink.api.common.functions.RichMapFunction
import org.apache.flink.api.scala.ExecutionEnvironment

/**
 * @author Amos
 * @date 2022/5/22
 */

object BatchPartitionByHashDemo {
  def main(args: Array[String]): Unit = {
    val env = ExecutionEnvironment.getExecutionEnvironment
    import org.apache.flink.api.scala._

    val source: DataSet[(Int, String)] = env.fromCollection(
      List((1, "a"), (1, "b"), (1, "c"), (2, "a"), (2, "b"), (3, "a"), (3, "b"), (3, "c"), (4, "a"), (4, "a"),
        (5, "a"), (5, "a"))
    )

    // 查看没有分区前数据的处理位置
    val mapDataSet = source.map(x => x).setParallelism(5)
    val result = mapDataSet.partitionByHash(0).map(new RichMapFunction[(Int,String),(Int,(Int,String))] {
      override def map(value: (Int, String)): (Int, (Int, String)) = {
        (getRuntimeContext.getIndexOfThisSubtask,value)
      }
    })

    result.print()

  }

}
