package streaming.demo.mq.kafka.config;

import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;

public class MyPartitioner extends FlinkKafkaPartitioner {


    /**
     * @param record      正常的记录
     * @param key         KeyedSerializationSchema中配置的key
     * @param value       KeyedSerializationSchema中配置的value
     * @param targetTopic targetTopic
     * @param partitions  partition列表[0, 1, 2, 3, 4]
     * @return partition
     */
    @Override
    public int partition(Object record, byte[] key, byte[] value, String targetTopic, int[] partitions) {
        System.out.println("key:" + new String(key));
        System.out.println("value:" + new String(value));
        System.out.println("targetTopic:" + targetTopic);
        //这里接收到的key是上面MySchema()中序列化后的key，需要转成string，然后取key的hash值取模kafka分区数量
        int part = Math.abs(new String(key).hashCode() % partitions.length);
        //打印验证
        System.out.println("part:"+part+"+size:"+partitions.length);
        return part;
    }

}
