package cn.edu.flink.tutorial.partition;

import cn.edu.flink.tutorial.source.UDSourceFunction;
import org.apache.flink.api.common.functions.Partitioner;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class TestPartitiner {
    public static void main(String[] args) throws Exception {
        // 创建带webui的本地执行环境
        Configuration conf = new Configuration();
        conf.setString(RestOptions.BIND_PORT, "8081"); // 指定访问端口
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(conf);
        env.setParallelism(1);

        SingleOutputStreamOperator<Tuple2<Long, Long>> streamSource = env
                .addSource(new UDSourceFunction())
                .map(x -> Tuple2.of(x, 1L))
                .returns(Types.TUPLE(Types.LONG, Types.LONG));

        // 分区不同，默认采用rescale分区策略重分区
        streamSource.print("print").setParallelism(10).name("print");


        //1、采用Global分区策略重分区
        streamSource.global().print("global").setParallelism(10).name("global");
        //2、采用SHUFFLE分区策略重分区（随机的）
        streamSource.shuffle().print("shuffle").setParallelism(10).name("shuffle");
        //3、采用rebalance分区策略重分区
        streamSource.rebalance().print("rebalace").setParallelism(10).name("rebalace");
        //4、采用rescale分区策略重分区
        streamSource.rescale().print("rescale").setParallelism(10).name("rescale");
        //5、采用broadcast分区策略重分区
        streamSource.broadcast().print("broadcast").setParallelism(10).name("broadcast");
        //6、forward, Forward partitioning does not allow change of parallelism.
        streamSource.forward().print("forward").name("forward");
        //7、hash
        streamSource.keyBy(1).print("hash").setParallelism(10).name("hash");
        //8、custom
        streamSource.partitionCustom(
                new Partitioner<Long>() {
                    @Override
                    public int partition(Long key, int numPartitions) {
                        return 0;
                    }
                },
                new KeySelector<Tuple2<Long, Long>, Long>() {
                    @Override
                    public Long getKey(Tuple2<Long, Long> value) throws Exception {
                        return value.f1;
                    }
                }).print().setParallelism(10).name("custom");


        env.execute("TestPartitiner");
    }
}
