package com.atguigu.flink.day04;

import org.apache.flink.api.common.functions.Partitioner;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * @author Felix
 * @date 2024/8/13
 * 该案例演示了分区操作(算子)
 */
public class Flink02_Par {
    public static void main(String[] args) throws Exception {
        //TODO 1.指定流处理环境
        Configuration conf = new Configuration();
        conf.set(RestOptions.PORT,8088);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(4);
        //env.disableOperatorChaining();
        //TODO 2.从指定的网络端口读取数据
        DataStreamSource<String> ds = env.socketTextStream("hadoop102", 8888);
        //TODO 3.分区操作  打印
        //3.1 shuffle
        //ds.shuffle().print();
        //3.2 rebalance
        //ds.map(a->a).setParallelism(2).rebalance().print();
        //3.3 rescale
        ds.map(a->a).setParallelism(2).rescale().print();
        //3.4 broadcast
        //ds.broadcast().print();
        //3.5 global
        //ds.global().print();
        //3.6 keyBy    hash
        //ds.keyBy(a->a).print();
        //3.7 one-2-one   forward
        //ds.print();
        //3.8 自定义     custom
        //ds.partitionCustom(
        //        new MyPar(),
        //        a->a
        //).print();

        //TODO 4.提交作业
        env.execute();
    }
}

class MyPar implements Partitioner<String>{
    @Override
    public int partition(String key, int numPartitions) {
        return key.hashCode() % numPartitions;
    }
}