package com.gy.spark.sparkstreaming.offsetManager.getOffset;

import org.apache.kafka.common.TopicPartition;

import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;

/**
 * 测试之前需要启动kafka
 *
 * @author root
 */
public class GetTopicOffsetFromKafkaBroker {
    public static void main(String[] args) {

        Map<TopicPartition, Long> topicOffsets = getTopicOffsets("node1:9092,node2:9092,node3:9092", "mytopic");
        Set<Entry<TopicPartition, Long>> entrySet = topicOffsets.entrySet();
        for (Entry<TopicPartition, Long> entry : entrySet) {
            TopicPartition TopicPartition = entry.getKey();
            Long offset = entry.getValue();
            String topic = TopicPartition.topic();
            int partition = TopicPartition.partition();
            System.out.println("topic = " + topic + ",partition = " + partition + ",offset = " + offset);
        }

    }

    /**
     * 从kafka集群中得到当前topic，生产者在每个分区中生产消息的偏移量位置
     *
     * @param KafkaBrokerServer
     * @param topic
     * @return
     */
    public static Map<TopicPartition, Long> getTopicOffsets(String KafkaBrokerServer, String topic) {
        Map<TopicPartition, Long> retVals = new HashMap<TopicPartition, Long>();

        for (String broker : KafkaBrokerServer.split(",")) {

//            SimpleConsumer simpleConsumer = new SimpleConsumer(broker.split(":")[0], Integer.valueOf(broker.split(":")[1]), 64 * 10000, 1024, "consumer");
//            TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(Arrays.asList(topic));
//            TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);
//
//            for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
//                for (PartitionMetadata part : metadata.partitionsMetadata()) {
//                    Broker leader = part.leader();
//                    if (leader != null) {
//                        TopicPartition TopicPartition = new TopicPartition(topic, part.partitionId());
//
//                        PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 10000);
//                        OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(TopicPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId());
//                        OffsetResponse offsetResponse = simpleConsumer.getOffsetsBefore(offsetRequest);
//
//                        if (!offsetResponse.hasError()) {
//                            long[] offsets = offsetResponse.offsets(topic, part.partitionId());
//                            retVals.put(TopicPartition, offsets[0]);
//                        }
//                    }
//                }
//            }
//            simpleConsumer.close();
        }
        return retVals;
    }
}
