package edu.nepu.flink.api.partition;

import org.apache.flink.api.common.functions.Partitioner;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * @Date 2024/2/28 21:57
 * @Created by chenshuaijun
 */
public class CustomerDefinePartition {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(2);

        DataStreamSource<String> streamSource = env.socketTextStream("hadoop102", 9999);

        streamSource.partitionCustom(new MyPartition(),key -> key).print();

        env.execute();
    }

    static class MyPartition implements Partitioner<String>{

        /**
         * @param key The key. 这个是我呢吧分区的key
         * @param numPartitions 这个是分区的数量
         * @return 返回的是分区的编号
         */
        @Override
        public int partition(String key, int numPartitions) {
            return Integer.parseInt(key) % numPartitions;
        }
    }
}
