package com.intct.flink;

import org.apache.flink.api.common.functions.Partitioner;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * @author gufg
 * @since 2025-06-26 09:56
 */
public class PartitionDemo {
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        conf.set(RestOptions.BIND_PORT, "8081");
        // 获取环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(3);
        // 源
        DataStreamSource<String> socketTextStream = env.socketTextStream("jd-node", 8888);

        // 转换
        /**
         * 1 比普通的Function多出生命周期方法:
         *      open()  : 在每次启动作业时,每个并行度都调用一次
         *      close() : 每个作业结束时,每个并行度都调用一次
         *          如果程序异常退出,不会调用close()方法
         *          只有正常退出,才会调用close方法
         */
        SingleOutputStreamOperator<Tuple2<String, Integer>> flatMapDS =
                socketTextStream.flatMap(new RichFlatMapFunction<String, Tuple2<String, Integer>>() {

            @Override
            public void open(Configuration parameters) throws Exception {
                System.out.println("调用open方法!");
            }

            @Override
            public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
                String[] splits = value.split(" ");
                for (String split : splits) {
                    out.collect(Tuple2.of(split, 1));
                }
            }

            @Override
            public void close() throws Exception {
                System.out.println("调用close方法!");
            }
        });

        // shuffle 完全随机化数据的分发，不保留任何顺序信息
//        flatMapDS.shuffle();

        // rebalance rebalance 操作会随机地重新分布数据，以实现负载均衡。每个并行实例接收到的数据量大致相等，但并不保证相同 key 的元素会被分配到同一实例上
//        flatMapDS.rebalance();

        // broadcast 将所有的数据复制给每一个下游的并行实例。这意味着每个下游任务都会收到完整的上游数 据集副本，常用于配置参数或小表的广播
//        flatMapDS.broadcast().print();

        // global 全局
//        flatMapDS.global().print();

//        flatMapDS.rescale().print();

        // 自定义分区策略
        DataStream<Tuple2<String, Integer>> partitionCustomDS = flatMapDS.partitionCustom(new Partitioner<String>() {

            @Override
            public int partition(String key, int numPartitions) {
                // 实现分区策略
                return key.hashCode() % numPartitions;
            }
        }, new KeySelector<Tuple2<String, Integer>, String>() {
            @Override
            public String getKey(Tuple2<String, Integer> value) throws Exception {
                return value.f0;
            }
        });

        partitionCustomDS.print();

        env.execute();
    }
}
