package com.bigdata.flink.datastream;

import com.bigdata.flink.util.StreamEnvUtil;
import org.apache.flink.api.common.functions.Partitioner;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class PartitionerTest {
  public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment env = StreamEnvUtil.getEnv();
    DataStreamSource<String> socketSource = StreamEnvUtil.getSocketSource(env);
    env.setParallelism(3);

    custom(socketSource);

    env.execute();
  }

  public static void custom(DataStreamSource<String> socketSource) {
    socketSource.partitionCustom(new Partitioner<String>() {
      @Override
      public int partition(String key, int numPartitions) {
        return Math.abs(key.hashCode()) % numPartitions;
      }
    }, r -> r).print();
  }

  // 随机分区 ShufflePartitioner，随机发送到下游并行任务
  public static void shuffle(DataStreamSource<String> socketSource) {
    socketSource.shuffle().print();
  }

  // 轮询 RebalancePartitioner，轮询发送到下游并行任务
  public static void rebalance(DataStreamSource<String> socketSource) {
    socketSource.rebalance().print();
  }

  // RescalePartitioner
  // 重缩放，rebalance的优化，rescale会对同一个TaskManager节点的并行子任务轮询并发送数据
  // rebalance会对所有的TaskManager跨节点轮询子任务发送数据
  public static void rescale(DataStreamSource<String> socketSource) {
    socketSource.rescale().print();
  }

  /**
   * 1> 1
   * 3> 1
   * 2> 1
   * 1> 2
   * 3> 2
   * 2> 2
   * 1> 1
   * 2> 1
   * 3> 1
   * 3> 4
   * 2> 4
   * 1> 4
   */
  // BroadcastPartitioner，向所有并行度的子任务发送上游数据
  public static void broadcast(DataStreamSource<String> socketSource) {
    socketSource.broadcast().print();
  }

  /**
   * 通过调用dataStream的global()方法，会将所有的输入流数据都发送到下游算子的第一个并行子任务中去。这就相当于强行让下游任务并行度变成了 1，所以使用这个操作需要非常谨慎，可能对程序造成很大的压力
   */
  public static void global(DataStreamSource<String> socketSource) {
    socketSource.global().print();
  }
}
