package com.peng.consumer;

import kafka.api.FetchRequestBuilder;
import kafka.cluster.BrokerEndPoint;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.javaapi.message.ByteBufferMessageSet;
import kafka.message.MessageAndOffset;

import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

/*
* 根据指定的Topic、Pattition、offset、来获取数据
* */
public class LowerConsumer {
    public static void main(String[] args) {
        //定义相关参数
        ArrayList<String> brokers=new ArrayList<String>();//kafka集群
        brokers.add("localhost");
        //端口号
        int port=9092;
        //主题
        String topic="first";
        //分区
        int partition=0;
        //offset
        long offset=2;
        LowerConsumer lowerConsumer=new LowerConsumer();
        lowerConsumer.getData(brokers,port,topic,partition,offset);
    }
    //找指定topic 指定分区的leader 所在的机器
    private BrokerEndPoint findLeader(List<String> brokers,int port,String topic,int partition){
        for(String broker:brokers){
            //创建获取分区leader的消费者对象
            SimpleConsumer getLeader=new SimpleConsumer(broker,port,1000,1024*4,"getLeader");
            //创建主题元数据的信息请求
            TopicMetadataRequest topicMetadataRequest=new TopicMetadataRequest(Collections.singletonList(topic));
            //获取主题元数据的返回值
            TopicMetadataResponse metadataResponse=getLeader.send(topicMetadataRequest);
            //解析主题元数据的返回值
            List<TopicMetadata> topicsMetadata=metadataResponse.topicsMetadata();
            //遍历主题元数据
            for(TopicMetadata topicMetadata:topicsMetadata){
                //获取多个分区的元数据信息
                List<PartitionMetadata> partitionsMetadata=topicMetadata.partitionsMetadata();
                //遍历每个分区的元数据
                for(PartitionMetadata partitionMetadata:partitionsMetadata){
                    if(partition==partitionMetadata.partitionId()){
                        return partitionMetadata.leader();
                    }
                }
            }
        }
        return null;
    }
    //获取数据  获取找指定topic 指定分区 指定offset的leader
    private void getData(List<String> brokers,int port,String topic,int partition,long offset){
        //获取分区leader所在的机器
        BrokerEndPoint leader=findLeader(brokers,port,topic,partition);
        if(leader==null) {
            return;
        }
        String leaderHost=leader.host();

        //获取数据的消费者对象
        SimpleConsumer getData=new SimpleConsumer(leaderHost,port,1000,1024*4,"getData");

        //创建获取数据的对象
        kafka.api.FetchRequest fetchRequest=new FetchRequestBuilder().addFetch(topic,partition,offset,5000).build();//字节数，不是条数，可以addFetch多个topic
        //获取数据的返回值
        FetchResponse fetchResponse=getData.fetch(fetchRequest);
        //解析返回值
        ByteBufferMessageSet messageAndOffsetsets=fetchResponse.messageSet(topic,partition);
        //遍历并打印
        for(MessageAndOffset messageAndOffset:messageAndOffsetsets){
            long offset1=messageAndOffset.offset();
            ByteBuffer payload=messageAndOffset.message().payload();//反序列化
            byte[] bytes=new byte[payload.limit()];
            payload.get(bytes);
            System.out.println(offset1+"--"+new String(bytes));
        }

    }
}
