package com.atguigu.day04;

import org.apache.flink.api.common.functions.Partitioner;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * @author Felix
 * @date 2024/7/12
 * 该案例演示了分区操作
 */
public class Flink02_Par {
    public static void main(String[] args) throws Exception {
        //TODO 1.指定流处理环境
        Configuration conf = new Configuration();
        conf.set(RestOptions.PORT,8088);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(21);
        //env.disableOperatorChaining();
        //TODO 2.从指定的网络端口读取数据
        DataStreamSource<String> socketDS = env.socketTextStream("hadoop102", 8888);

        //TODO 3.shuffle
        //socketDS.shuffle().print();

        //TODO 4.rebalance
        //socketDS.map(value -> value).setParallelism(2).rebalance().print();

        //TODO 5.rescale
        socketDS.map(value -> value).setParallelism(5).rescale().print();

        //TODO 6.broadcast
        //socketDS.broadcast().print();

        //TODO 7.global
        //socketDS.global().print();

        //TODO 8.one-to-one   forward
        //socketDS.print();

        //TODO 9.keyby        hash
        //socketDS.keyBy(a->a).print();

        //TODO 10.自定义分区器    custom
        //socketDS.partitionCustom(new MyPartitioner(),a->a).print();

        env.execute();

    }
}

class MyPartitioner implements Partitioner<String>{

    @Override
    public int partition(String key, int numPartitions) {
        return key.hashCode()%numPartitions;
    }
}
