package com.atguigu.day04;

import org.apache.flink.api.common.functions.Partitioner;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * @author Felix
 * @date 2024/4/2
 * 该案例演示了分区算子
 */
public class Flink02_part {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());
        env.setParallelism(4);
        env.disableOperatorChaining();
        DataStreamSource<String> socketDS = env.socketTextStream("hadoop102", 8888);

        //socketDS.shuffle().print();
        //socketDS.map(value -> value).setParallelism(2).rebalance().print();
        socketDS.map(value -> value).setParallelism(2).rescale().print();
        //socketDS.broadcast().print();
        //socketDS.map(value -> value).global().print();
        //socketDS.map(value -> value).keyBy(value -> value).print();
        //socketDS.map(value -> value).map(value -> value).print();

        //socketDS.partitionCustom(new MyPartitioner(),a->a).print();

        env.execute();
    }
}

//自定义分区器类
class MyPartitioner implements Partitioner<String>{
    @Override
    public int partition(String key, int numPartitions) {
        return key.hashCode()%numPartitions;
    }
}

