package com.huwaiwai.kafka.demo02.consumer;

import com.google.common.collect.Maps;
import com.huwaiwai.kafka.KafkaProperties;
import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.cluster.Broker;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.javaapi.message.ByteBufferMessageSet;
import kafka.message.Message;
import kafka.message.MessageAndOffset;

import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;

/**
 * 简单消费者
 * offset自己维护 目标topic partition均由自己分配
 * Created by Administrator on 2017/5/1.
 */
public class MySimpleConsumer {

//    public static void main(String[] args) {
//        new MySimpleConsumer().consume();
//    }

//    public void consume() {
//        int partition = 0;
//        Broker leaderBroker = findLeader(KafkaProperties.BROKER_LIST, KafkaProperties.TOPIC, partition);
//        SimpleConsumer simpleConsumer = new SimpleConsumer(leaderBroker.host(), leaderBroker.port(), 20000, 10000, "mySimpleConsumer");
//        long startOffset = 1;
//        int fetchSize = 1000;
//
//        while (true) {
//            long offset = startOffset;
//            FetchRequest request = new FetchRequestBuilder().addFetch(KafkaProperties.TOPIC, 0, startOffset, fetchSize).build();
//            FetchResponse fetchResponse = simpleConsumer.fetch(request);
//
//            ByteBufferMessageSet messageSet = fetchResponse.messageSet(KafkaProperties.TOPIC, partition);
//            for (MessageAndOffset messageAndOffset : messageSet) {
//                Message mess = messageAndOffset.message();
//                ByteBuffer payload = mess.payload();
//                byte[] bytes = new byte[payload.limit()];
//                payload.get(bytes);
//                String msg = new String(bytes);
//                offset = messageAndOffset.offset();
//                System.out.println("partition : " + 3 + ", offset : " + offset + "  mess : " + msg);
//                startOffset = offset + 1;
//            }
//        }
//    }

//    public Broker findLeader(String brokerHosts, String topic, int partition) {
//        Broker leader = findPartitionMetadata(brokerHosts, topic, partition).leader();
//        System.out.println(String.format("Leader tor topic %s, partition %d is %s:%d", topic, partition, leader.host(),
//                leader.port()));
//        return leader;
//    }

    private PartitionMetadata findPartitionMetadata(String brokerHosts, String topic, int partition) {
        PartitionMetadata returnMetaData = null;
        for (String broketHost : brokerHosts.split(":")) {
            SimpleConsumer consumer = null;
            String[] splits = brokerHosts.split(":");
            consumer = new SimpleConsumer(splits[0], Integer.valueOf(splits[1]), 100000, 64 * 1024, "leaderLookup");
            List<String> topics = Collections.singletonList(topic);
            TopicMetadataRequest request = new TopicMetadataRequest(topics);
            TopicMetadataResponse response = consumer.send(request);
            List<TopicMetadata> topicMetadataList = response.topicsMetadata();
            for (TopicMetadata topicMetadata : topicMetadataList) {
                for (PartitionMetadata PartitionMetadata : topicMetadata.partitionsMetadata()) {
                    if (PartitionMetadata.partitionId() == partition) {
                        returnMetaData = PartitionMetadata;
                    }
                }
            }
            if (consumer != null)
                consumer.close();
        }
        return returnMetaData;
    }

    public long getLastOffset(SimpleConsumer consumer, String topic, int partition, String clientId, long whichTime) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = Maps.newHashMap();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
        OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId);
        OffsetResponse response = consumer.getOffsetsBefore(request);
        long[] offsets = response.offsets(topic, partition);
        return offsets[0];
    }

}
