package cc.jinglupeng.kom.data;

import cc.jinglupeng.kom.bean.ConsumerTopicPartitionState;
import cc.jinglupeng.kom.bean.ZkPartitionState;
import cc.jinglupeng.kom.pool.KafkaConsumerPool;
import cc.jinglupeng.kom.pool.ZookeeperPool;
import cc.jinglupeng.kom.util.ZkUtils;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.cluster.Broker;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.Assert;

import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * Created by jinglupeng on 4/10/16.
 */
public class KafkaData {

    private static Logger logger = LoggerFactory.getLogger(KafkaData.class);
    public final static String CLIENT_ID = "KOM";

    /**
     * 获取 consumer 消费的 某个 topic 的所有分区的状态
     *
     * @param consumer
     * @param topic
     * @return
     */
    public static ConsumerTopicPartitionState[] getConsumerTopicPartitionStates(String consumer, String topic) throws Exception {
        Assert.notNull(consumer, "consumer cannot be null.");
        Assert.notNull(topic, "topic cannot be null.");

        ZooKeeper zooKeeper = ZookeeperPool.borrowZooKeeper();
        SimpleConsumer defaultSimpleConsumer = KafkaConsumerPool.borrowDefaultSimpleConsumer();
        try {
            // 获取所有的分区
            TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(Arrays.asList(topic));
            TopicMetadataResponse topicMetadataResponse = defaultSimpleConsumer.send(topicMetadataRequest);
            TopicMetadata topicMetadata = topicMetadataResponse.topicsMetadata().get(0);
            List<PartitionMetadata> partitionMetadatas = topicMetadata.partitionsMetadata();
            ConsumerTopicPartitionState[] consumerTopicPartitionStates = new ConsumerTopicPartitionState[partitionMetadatas.size()];

            // 根据分区获取信息
            int index = 0;
            for (PartitionMetadata partitionMetadata : partitionMetadatas) {
                Broker broker = partitionMetadata.leader();
                int brokerId = broker.id();
                int partition = partitionMetadata.partitionId();
                SimpleConsumer simpleConsumer = KafkaConsumerPool.borrowSimpleConsumer(brokerId);
                ConsumerTopicPartitionState consumerTopicPartitionState = new ConsumerTopicPartitionState();
                consumerTopicPartitionState.setConsumer(consumer);
                consumerTopicPartitionState.setTopic(topic);
                consumerTopicPartitionState.setPartition(partition);
                try {
                    TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
                    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
                    requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
                    kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), CLIENT_ID);
                    OffsetResponse response = simpleConsumer.getOffsetsBefore(request);
                    if (response.hasError()) {
                        throw new Exception("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));
                    }
                    long[] offsets = response.offsets(topic, partition);
                    long lastOffset = offsets[0];
                    consumerTopicPartitionState.setLastOffset(lastOffset);
                } finally {
                    KafkaConsumerPool.returnSimpleConsumer(brokerId, simpleConsumer);
                }

                String offsetPath = String.format("/consumers/%s/offsets/%s/%s", consumer, topic, partition);
                String ownerPath = String.format("/consumers/%s/owners/%s/%s", consumer, topic, partition);
                String offset = ZkUtils.getData(zooKeeper, offsetPath);
                String owner = ZkUtils.getData(zooKeeper, ownerPath);
                consumerTopicPartitionState.setOffset(offset == null ? -1l : Long.parseLong(offset));
                consumerTopicPartitionState.setOwner(owner);

                Stat stat = ZkUtils.getStat(offsetPath);
                long lastCommitTime = stat == null ? -1 : stat.getMtime();
                consumerTopicPartitionState.setLastCommitTime(lastCommitTime);
                consumerTopicPartitionStates[index++] = consumerTopicPartitionState;
            }
            return consumerTopicPartitionStates;
        } finally {
            ZookeeperPool.returnZooKeeper(zooKeeper);
            KafkaConsumerPool.returnDefaultSimpleConsumer(defaultSimpleConsumer);
        }
    }


    /**
     * 获取 topic partition lastOffset
     *
     * @param topic
     * @param partition
     * @return
     * @throws Exception
     */
    public static long getLastOffset(String topic, int partition) throws Exception {
        Assert.notNull(topic, "topic cannot be null.");

        ZkPartitionState partitionState = ZkData.getPartitionState(topic, partition);
        int leader = partitionState.getLeader();
        SimpleConsumer consumer = KafkaConsumerPool.borrowSimpleConsumer(leader);
        try {
            TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
            Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
            requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
            kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), CLIENT_ID);
            OffsetResponse response = consumer.getOffsetsBefore(request);
            if (response.hasError()) {
                throw new Exception("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));
            }
            long[] offsets = response.offsets(topic, partition);
            return offsets[0];
        } finally {
            KafkaConsumerPool.returnSimpleConsumer(leader, consumer);
        }
    }

}
