package com.derbysoft.nuke.kafka.manager.infrastructure.kafka;

import com.derbysoft.nuke.kafka.manager.infrastructure.kafka.model.ConsumerGroupSummary;
import com.derbysoft.nuke.kafka.manager.infrastructure.kafka.model.Message;
import com.derbysoft.nuke.kafka.manager.infrastructure.kafka.model.OffsetTimestamp;
import com.derbysoft.nuke.kafka.manager.infrastructure.kafka.model.PartitionOffset;
import org.apache.kafka.clients.NodeApiVersions;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;

import java.util.List;
import java.util.Map;
import java.util.Optional;

public interface KafkaAdminClient {

    List<Message<String, String>> fetchMessages(TopicPartition topicPartition, long startOffset, int limit);

    List<Message<String, String>> fetchMessages(Map<TopicPartition, Long> offsets, int limitPerPartition);

    Map<String, List<PartitionInfo>> listTopics();

    List<PartitionInfo> getPartitions(String topic);

    PartitionOffset getTopicPartitionOffset(TopicPartition topicPartition);

    Map<Integer, PartitionOffset> getTopicOffsets(String topic);

    Map<TopicPartition, PartitionOffset> getAllTopicPartitionOffsets();

    Map<Integer, OffsetTimestamp> getStartOffsetTimestamps(String topic);

    Map<Integer, OffsetTimestamp> getEndOffsetTimestamps(String topic);

    Map<Integer, PartitionOffset> searchTopicOffsetsBetween(String topic, Long startTime, Long endTime);

    /**
     * 返回time之后的第一条消息的offset和时间戳
     *
     * @param topicPartition
     * @param time
     * @return
     */
    Optional<OffsetTimestamp> getTopicPartitionOffset(TopicPartition topicPartition, Long time);

    List<Node> bootstrapBrokers();

    Map<Node, NodeApiVersions> listAllBrokerVersionInfo();

    Map<Node, List<String>> listAllConsumerGroups();

    ConsumerGroupSummary describeConsumerGroup(String groupId);

    Map<TopicPartition, Long> listGroupOffsets(String groupId);

    void close();
}
