package com.fwmagic.flink.partition;

import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * flink提供的分区控制策略及自定义分区
 *
 */
public class PhysicalPartitionDemo {
    public static void main(String[] args) throws Exception{
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //DataStreamSource<String> dataStreamSource = env.fromElements("java", "flink", "scala", "spark", "hadoop", "flink-sql", "flink-table");
        DataStream<Tuple2<String, Integer>> dataStreamSource = env.fromElements(
                Tuple2.of("java", 3), Tuple2.of("flink", 4),
                Tuple2.of("flink-table", 2), Tuple2.of("flink-sql", 5),
                Tuple2.of("hadoop", 5),Tuple2.of("flink-test", 15));

        //随机分区
        //dataStreamSource.shuffle().print();

        //常用：通过循环的方式对数据重分区
        //dataStreamSource.rebalance().print();

        //也是通过循环的方式对数据重分区，不同的是：这是算子级别的轮询
        //dataStreamSource.rescale().print();

        //广播操作，可将数据量小的数据集广播出去，例如：在大数据集关联小数据集时
        //dataStreamSource.broadcast().print();

        //自定义分区,以flink开头的放在同一个分区中
        /**
         * 3> (java,3)
         * 1> (flink,4)
         * 3> (hadoop,5)
         * 1> (flink-table,2)
         * 1> (flink-sql,5)
         * 1> (flink-test,15)
         */
        dataStreamSource.partitionCustom(new MyCustomPartition(), 0).print();
        env.execute();
    }
}
