package com.atguigu.flink.chapter05.transform.partition;

import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * @Author lzc
 * @Date 2023/6/19 08:52
 */
public class PartitionDemo {
    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 2000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(2);
        
        env
            .fromElements(10, 21, 31, 41, 20, 20)  // etl
            //            .socketTextStream("hadoop162", 8888)
            //            .keyBy(x -> x % 2)
            //.shuffle()
            //            .rebalance()  // 平均分布
            //            .rescale()
            // .broadcast()
            //.global()
            .partitionCustom(new MyPartitioner(), new KeySelector<Integer, String>() {
                @Override
                public String getKey(Integer value) throws Exception {
                    return value + "";
                }
            })
            .print().setParallelism(2);
        
        // System.out.println(MathUtils.murmurHash("奇数".hashCode()) % 128);
        //        System.out.println(MathUtils.murmurHash("偶数".hashCode()) % 128);
        // System.out.println(MathUtils.murmurHash(Integer.valueOf(0).hashCode()) % 128);
        //System.out.println(MathUtils.murmurHash(Integer.valueOf(1).hashCode()) % 128);
        
        
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
/*
keyBy:
    按照 key 进行分区(分组)
    相同 key的元素, 肯定在同一个分区
    一个分区可以有多个组
    // key, 128, 2
    KeyGroupRangeAssignment.assignKeyToParallelOperator(
                key, maxParallelism, numberOfChannels);
                // 128, 2, [0,127]
                computeOperatorIndexForKeyGroup(
                maxParallelism, parallelism, assignToKeyGroup(key, maxParallelism));
                    // key, 128
                    assignToKeyGroup(key, maxParallelism)
                        // key 的hash 值, 128
                        computeKeyGroupForKeyHash(key.hashCode(), maxParallelism);
                            MathUtils.murmurHash(keyHash) % maxParallelism;
      [0,127] * 2 / 128
     keyGroupId * parallelism / maxParallelism;
     
     1. 对 key 进行双重 hash : hashCode->murmur hash
     
     2. keyGroupId * parallelism / maxParallelism 计算并行度
     
shuffle:
    随机分区
rebalance:
    平均分布
    
    跨 TaskManager

rescale:
    平均分布
    不跨 TaskManager
    
broadCast:
    广播
    
    一个元素会被广播到所有的分区
    
global:
    所有的元素都进入 0 并行度
    
 


 */