package com.pickup.kafka.config;

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.PartitionInfo;
import org.springframework.beans.factory.annotation.Configurable;
import org.springframework.stereotype.Component;


import java.util.List;
import java.util.Map;


@Configurable
public class PartitionerConfig implements Partitioner {

    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {

        // 参考文章
        // https://blog.csdn.net/bwt1989/article/details/93425790?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522164782604216782248580066%2522%252C%2522scm%2522%253A%252220140713.130102334..%2522%257D&request_id=164782604216782248580066&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~all~sobaiduend~default-1-93425790.142^v2^control,143^v4^control&utm_term=kafka%E5%88%86%E5%8C%BA&spm=1018.2226.3001.4187

        // 随机分区
        // List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
        // return ThreadLocalRandom.current().nextInt(partitions.size());

        // 根据 Broker 所在的 IP 地址实现定制化的分区
        // List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
        // return partitions.stream().filter(p -> isSouth(p.leader().host())).map(PartitionInfo::partition).findAny().get();

        // 根据key值分区（目前采用）
        List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
        return Math.abs(key.hashCode()) % partitions.size();
    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> map) {

    }
}
