package com.aiwiown.snackmq.api.impl;

import com.aiwiown.snackmq.api.BatchMessageListener;
import com.aiwiown.snackmq.api.ConsumeResult;
import com.aiwiown.snackmq.api.Consumer;
import com.aiwiown.snackmq.api.MessageListener;
import com.aiwiown.snackmq.api.OrderedMessageListener;
import com.aiwiown.snackmq.api.Producer;
import com.aiwiown.snackmq.api.PullResult;
import com.aiwiown.snackmq.api.config.ConsumerConfig;
import com.aiwiown.snackmq.api.config.ProducerConfig;
import com.aiwiown.snackmq.api.exception.SnackMQClientException;
import com.aiwiown.snackmq.common.message.Message;
import com.aiwiown.snackmq.common.message.MessageStatus;
import com.aiwiown.snackmq.common.message.MessageType;
import com.aiwiown.snackmq.network.client.NettyClient;
import com.google.common.util.concurrent.RateLimiter;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;

/**
 * 【已重构】"推"模式消费者的默认实现。
 * 此版本通过组合 ConsumerDelegate 来实现共享逻辑，遵循“组合优于继承”原则。
 */
@Slf4j
public class DefaultPushConsumer implements Consumer {

    // --- 新增：委托类，封装共享逻辑 ---
    private final ConsumerDelegate delegate;

    // --- 拉取权管理与连续拉取相关 ---
    private final ConcurrentMap<TopicPartition, Object> pullingPartitions = new ConcurrentHashMap<>();
    private static final Object PULLING_PLACEHOLDER = new Object();

    // --- Push模式特有的组件 ---
    private final Function<String, Producer> dlqProducerFactory;
    private volatile Producer dlqProducer;
    private final Object dlqProducerLock = new Object();

    // --- 订阅与监听器管理 ---
    private final Map<String, SubscriptionData> subscriptions = new ConcurrentHashMap<>();
    private final Map<TopicPartition, PartitionProcessor> orderedProcessors = new ConcurrentHashMap<>();

    private final Set<CompletableFuture<?>> pendingCommits = ConcurrentHashMap.newKeySet();
    private final ConcurrentMap<TopicPartition, CompletableFuture<Void>> inflightPartitionCommits = new ConcurrentHashMap<>();
    private final Map<Long, AtomicInteger> retryCounts = new ConcurrentHashMap<>();

    // --- 调度与性能 ---
    private final ExecutorService consumeExecutor;
    private final ScheduledExecutorService pullScheduler;
    private final ScheduledExecutorService metadataScheduler;
    private final ScheduledExecutorService dispatchScheduler;
    private final ScheduledExecutorService orderedRetryScheduler;
    private final ScheduledExecutorService retryScheduler;
    private final RateLimiter rateLimiter;

    // --- 缓冲与流控 ---
    private final Map<TopicPartition, BlockingQueue<Message>> pulledMessageCache = new ConcurrentHashMap<>();

    private static final String DLQ_PREFIX = "DLQ_";
    private static final int MAX_COMMIT_REDIRECT_RETRIES = 5;

    @Getter
    @AllArgsConstructor
    private static class SubscriptionData {
        private final Object listener;
        private final String filterExpression;
    }

    public DefaultPushConsumer(String brokerAddress, ConsumerConfig consumerConfig) {
        this(brokerAddress, consumerConfig, addr -> new DefaultProducer(addr, ProducerConfig.builder().build()));
    }

    /**
     * 【修改】此公共构造函数现在委托给新的包级私有构造函数，以支持测试。
     */
    public DefaultPushConsumer(String brokerAddress, ConsumerConfig consumerConfig, Function<String, Producer> dlqProducerFactory) {
        this(new ConsumerDelegate(brokerAddress, consumerConfig), dlqProducerFactory);
    }

    /**
     * 【新增】包级私有的构造函数，用于在单元测试中注入一个 mock/spy 的 delegate。
     * 这解决了在重构后无法直接 mock 消费者内部网络客户端的问题。
     */
    DefaultPushConsumer(ConsumerDelegate delegate, Function<String, Producer> dlqProducerFactory) {
        this.delegate = delegate;
        this.dlqProducerFactory = dlqProducerFactory;

        // 【修改】所有初始化逻辑现在都从注入的 delegate 中获取配置
        String group = delegate.getConsumerConfig().getConsumerGroup();
        this.pullScheduler = Executors.newSingleThreadScheduledExecutor(r -> new Thread(r, "snackmq-push-consumer-pull-" + group));
        this.metadataScheduler = Executors.newSingleThreadScheduledExecutor(r -> new Thread(r, "snackmq-push-consumer-metadata-" + group));
        this.dispatchScheduler = Executors.newSingleThreadScheduledExecutor(r -> new Thread(r, "snackmq-push-consumer-dispatch-" + group));
        this.orderedRetryScheduler = Executors.newSingleThreadScheduledExecutor(r -> new Thread(r, "snackmq-push-consumer-ordered-retry-" + group));
        this.retryScheduler = Executors.newSingleThreadScheduledExecutor(r -> new Thread(r, "snackmq-push-consumer-retry-" + group));

        if (delegate.getConsumerConfig().getConsumeThreadNums() > 0) {
            this.consumeExecutor = Executors.newFixedThreadPool(
                    delegate.getConsumerConfig().getConsumeThreadNums(),
                    r -> new Thread(r, "snackmq-push-consumer-biz-" + group)
            );
        } else {
            this.consumeExecutor = null;
        }

        if (delegate.getConsumerConfig().getRateLimitPerSecond() > 0) {
            this.rateLimiter = RateLimiter.create(delegate.getConsumerConfig().getRateLimitPerSecond());
        } else {
            this.rateLimiter = null;
        }
    }

    @Override
    public void start() throws SnackMQClientException {
        // 委托给 delegate 启动
        delegate.start();
        if (!subscriptions.isEmpty()) {
            startSchedulers();
        }
    }

    @Override
    public void close() throws IOException {
        if (!delegate.isStarted()) return;

        log.info("Shutting down push consumer for group '{}'...", delegate.getConsumerConfig().getConsumerGroup());
        // 1. 首先关闭所有调度器
        this.pullScheduler.shutdownNow();
        this.metadataScheduler.shutdownNow();
        this.dispatchScheduler.shutdownNow();
        this.orderedRetryScheduler.shutdownNow();
        this.retryScheduler.shutdownNow();
        if (this.consumeExecutor != null) {
            this.consumeExecutor.shutdownNow();
        }

        // 2. 等待悬而未决的位点提交
        if (!pendingCommits.isEmpty()) {
            log.info("Waiting for {} pending offset commits to complete...", pendingCommits.size());
            try {
                CompletableFuture.allOf(pendingCommits.toArray(new CompletableFuture[0])).get(5, TimeUnit.SECONDS);
            } catch (Exception e) {
                log.warn("Failed to wait for all pending commits to complete.", e);
            }
        }

        // 3. 关闭 DLQ 生产者
        if (this.dlqProducer != null) {
            try {
                this.dlqProducer.close();
            } catch (Exception e) {
                log.error("Error closing DLQ producer.", e);
            }
        }

        // 4. 委托给 delegate 关闭连接池
        delegate.close();
    }

    @Override
    public void subscribe(String topic, MessageListener listener) {
        doSubscribe(topic, null, listener);
    }

    @Override
    public void subscribe(String topic, BatchMessageListener listener) {
        doSubscribe(topic, null, listener);
    }

    @Override
    public void subscribe(String topic, OrderedMessageListener listener) {
        doSubscribe(topic, null, listener);
    }

    @Override
    public void subscribe(String topic, String filterExpression, MessageListener listener) {
        doSubscribe(topic, filterExpression, listener);
    }

    @Override
    public void subscribe(String topic, String filterExpression, BatchMessageListener listener) {
        doSubscribe(topic, filterExpression, listener);
    }

    @Override
    public void subscribe(String topic, String filterExpression, OrderedMessageListener listener) {
        doSubscribe(topic, filterExpression, listener);
    }

    private void doSubscribe(String topic, String filterExpression, Object listener) {
        subscriptions.put(topic, new SubscriptionData(listener, filterExpression));
        log.info("Subscribed to topic '{}' with listener type: {}", topic, listener.getClass().getSimpleName());
        if (delegate.isStarted() && subscriptions.size() == 1) {
            startSchedulers();
        }
    }

    private void startSchedulers() {
        log.info("Starting schedulers for push consumer group: {}", delegate.getConsumerConfig().getConsumerGroup());
        this.metadataScheduler.scheduleWithFixedDelay(this::updateAllTopicMetadata, 0, 5, TimeUnit.SECONDS);
        this.pullScheduler.scheduleWithFixedDelay(this::supervisePullLoops, 0, 500, TimeUnit.MILLISECONDS);
        this.dispatchScheduler.scheduleWithFixedDelay(this::dispatchPulledMessages, delegate.getConsumerConfig().getPullInitialDelayMillis() + 50, delegate.getConsumerConfig().getPullIntervalMillis(), TimeUnit.MILLISECONDS);
    }

    private void supervisePullLoops() {
        for (Map.Entry<String, SubscriptionData> subEntry : subscriptions.entrySet()) {
            String topic = subEntry.getKey();
            delegate.fetchTopicMetadata(topic).whenComplete((topicInfo, ex) -> {
                if (ex != null || topicInfo == null || topicInfo.getPartitionAssignments() == null) {
                    return;
                }
                for (Integer partitionId : topicInfo.getPartitionAssignments().keySet()) {
                    TopicPartition tp = new TopicPartition(topic, partitionId);
                    if (pullingPartitions.putIfAbsent(tp, PULLING_PLACEHOLDER) == null) {
                        log.trace("Supervisor starting a new pull loop for {}.", tp);
                        pullContinuouslyForPartition(tp, subEntry.getValue().getFilterExpression());
                    }
                }
            });
        }
    }

    private void pullContinuouslyForPartition(TopicPartition tp, String filterExpression) {
        if (!delegate.isStarted()) {
            pullingPartitions.remove(tp);
            return;
        }

        BlockingQueue<Message> buffer = pulledMessageCache.computeIfAbsent(tp,
                k -> new LinkedBlockingQueue<>(delegate.getConsumerConfig().getPullMessageBufferCapacity()));

        if (buffer.remainingCapacity() <= 0 || isOrderedProcessorBusy(tp)) {
            pullScheduler.schedule(() -> pullContinuouslyForPartition(tp, filterExpression), 50, TimeUnit.MILLISECONDS);
            return;
        }

        if (inflightPartitionCommits.containsKey(tp)) {
            pullScheduler.schedule(() -> pullContinuouslyForPartition(tp, filterExpression), 10, TimeUnit.MILLISECONDS);
            return;
        }

        fetchOffset(tp).whenComplete((offset, ex) -> {
            if (!delegate.isStarted()) return;
            if (ex != null) {
                log.error("Failed to fetch offset for partition {}", tp, ex);
                pullingPartitions.remove(tp);
                return;
            }

            long offsetToPull = offset + 1;
            int remainingCapacity = buffer.remainingCapacity();
            int maxPullMessages = Math.min(delegate.getConsumerConfig().getPullBatchSize(), remainingCapacity);
            if (maxPullMessages <= 0) {
                pullScheduler.schedule(() -> pullContinuouslyForPartition(tp, filterExpression), 50, TimeUnit.MILLISECONDS);
                return;
            }

            Message pullRequest = Message.builder()
                    .type(MessageType.PULL_REQUEST)
                    .topic(tp.getTopic())
                    .property("consumerGroup", delegate.getConsumerConfig().getConsumerGroup())
                    .property("partition", String.valueOf(tp.getPartitionId()))
                    .property("offset", String.valueOf(offsetToPull))
                    .property("maxMessages", String.valueOf(maxPullMessages))
                    .property("filterExpression", filterExpression)
                    .build();

            delegate.sendRequestToPartitionLeader(pullRequest, tp).whenComplete((response, pullEx) -> {
                if (!delegate.isStarted()) return;
                if (pullEx != null) {
                    log.error("Failed to pull messages for partition {}", tp, pullEx);
                    pullingPartitions.remove(tp);
                    return;
                }

                if (response.getStatus() == MessageStatus.SUCCESS && response.getMessages() != null && !response.getMessages().isEmpty()) {
                    try {
                        buffer.addAll(response.getMessages());
                        pullContinuouslyForPartition(tp, filterExpression);
                    } catch (IllegalStateException e) {
                        log.warn("Buffer is full for partition {}. Scheduling retry after delay.", tp, e);
                        pullScheduler.schedule(() -> pullContinuouslyForPartition(tp, filterExpression), 50, TimeUnit.MILLISECONDS);
                    }
                } else if (response.getStatus() == MessageStatus.EMPTY) {
                    pullScheduler.schedule(() -> pullContinuouslyForPartition(tp, filterExpression), 100, TimeUnit.MILLISECONDS);
                } else {
                    log.warn("Pull request for partition {} returned with status: {} and error: {}", tp, response.getStatus(), response.getErrorMessage());
                    pullingPartitions.remove(tp);
                }
            });
        });
    }

    private boolean isOrderedProcessorBusy(TopicPartition tp) {
        PartitionProcessor processor = orderedProcessors.get(tp);
        return processor != null && processor.isBusy();
    }

    private void updateAllTopicMetadata() {
        for (String topic : subscriptions.keySet()) {
            delegate.fetchTopicMetadata(topic).whenComplete((info, ex) -> {
                if (ex != null) {
                    log.error("Failed to update metadata for topic '{}'", topic, ex);
                }
            });
        }
    }

    private void dispatchPulledMessages() {
        try {
            for (Map.Entry<TopicPartition, BlockingQueue<Message>> entry : pulledMessageCache.entrySet()) {
                TopicPartition tp = entry.getKey();
                BlockingQueue<Message> buffer = entry.getValue();
                SubscriptionData subData = subscriptions.get(tp.getTopic());
                if (subData == null || buffer.isEmpty()) continue;

                Object listener = subData.getListener();
                if (listener instanceof OrderedMessageListener) {
                    dispatchToOrderedListener(tp, buffer, (OrderedMessageListener) listener);
                } else {
                    dispatchToRegularListener(buffer, listener);
                }
            }
        } catch (Exception e) {
            log.error("Error in dispatchPulledMessages task for group {}", delegate.getConsumerConfig().getConsumerGroup(), e);
        }
    }

    private void dispatchToOrderedListener(TopicPartition tp, BlockingQueue<Message> buffer, OrderedMessageListener listener) {
        PartitionProcessor processor = orderedProcessors.computeIfAbsent(tp,
                k -> new PartitionProcessor(k, listener, this.orderedRetryScheduler));
        List<Message> messagesToProcess = new ArrayList<>();
        buffer.drainTo(messagesToProcess);
        if (!messagesToProcess.isEmpty()) {
            processor.addMessages(messagesToProcess);
            processor.schedule();
        }
    }

    private void dispatchToRegularListener(BlockingQueue<Message> buffer, Object listener) {
        if (listener instanceof BatchMessageListener) {
            List<Message> batch = new ArrayList<>();
            buffer.drainTo(batch, delegate.getConsumerConfig().getConsumeBatchSize());
            if (!batch.isEmpty()) {
                if (consumeExecutor != null) {
                    consumeExecutor.submit(() -> processBatchMessage(batch, (BatchMessageListener) listener));
                } else {
                    processBatchMessage(batch, (BatchMessageListener) listener);
                }
            }
        } else if (listener instanceof MessageListener) {
            Message message;
            while ((message = buffer.poll()) != null) {
                final Message msgToProcess = message;
                if (consumeExecutor != null) {
                    consumeExecutor.submit(() -> processSingleMessage(msgToProcess, (MessageListener) listener));
                } else {
                    processSingleMessage(msgToProcess, (MessageListener) listener);
                }
            }
        }
    }

    private void processSingleMessage(Message message, MessageListener listener) {
        if (rateLimiter != null) rateLimiter.acquire();
        try {
            ConsumeResult result = listener.consume(message);
            if (result == ConsumeResult.SUCCESS) {
                commitOffsetAsync(message);
            } else {
                handleRetry(message, m -> processSingleMessage(m, listener));
            }
        } catch (Exception e) {
            log.error("Exception during message consumption for messageId: {}. Retrying...", message.getMessageId(), e);
            handleRetry(message, m -> processSingleMessage(m, listener));
        }
    }

    private void processBatchMessage(List<Message> messages, BatchMessageListener listener) {
        if (rateLimiter != null) rateLimiter.acquire(messages.size());
        try {
            ConsumeResult result = listener.consume(messages);
            if (result == ConsumeResult.SUCCESS) {
                commitOffsetAsync(messages.get(messages.size() - 1));
            } else {
                handleBatchRetry(messages, batch -> processBatchMessage(batch, listener));
            }
        } catch (Exception e) {
            log.error("Exception during batch message consumption. Retrying...", e);
            handleBatchRetry(messages, batch -> processBatchMessage(batch, listener));
        }
    }

    private ConsumeResult doProcessOrderedMessage(Message message, OrderedMessageListener listener) {
        if (rateLimiter != null) rateLimiter.acquire();
        try {
            ConsumeResult result = listener.consume(message);
            if (result == ConsumeResult.SUCCESS) {
                commitOffsetAsync(message).join();
                retryCounts.remove(message.getMessageId());
                return ConsumeResult.SUCCESS;
            }
            int currentRetries = retryCounts.computeIfAbsent(message.getMessageId(), k -> new AtomicInteger(0)).incrementAndGet();
            if (currentRetries > delegate.getConsumerConfig().getMaxRetryTimes()) {
                log.warn("Message {} has exceeded max retry times ({}). Sending to DLQ.", message.getMessageId(), delegate.getConsumerConfig().getMaxRetryTimes());
                sendToDLQ(message);
                commitOffsetAsync(message).join();
                retryCounts.remove(message.getMessageId());
                return ConsumeResult.SUCCESS;
            } else {
                log.debug("Scheduling retry #{} for message {}", currentRetries, message.getMessageId());
                return ConsumeResult.RETRY;
            }
        } catch (Exception e) {
            log.error("Exception during ordered message consumption for messageId: {}. Retrying...", message.getMessageId(), e);
            int currentRetries = retryCounts.computeIfAbsent(message.getMessageId(), k -> new AtomicInteger(0)).incrementAndGet();
            if (currentRetries > delegate.getConsumerConfig().getMaxRetryTimes()) {
                log.warn("Message {} (due to exception) has exceeded max retry times ({}). Sending to DLQ.", message.getMessageId(), delegate.getConsumerConfig().getMaxRetryTimes());
                sendToDLQ(message);
                commitOffsetAsync(message).join();
                retryCounts.remove(message.getMessageId());
                return ConsumeResult.SUCCESS;
            } else {
                return ConsumeResult.RETRY;
            }
        }
    }

    private void handleRetry(Message message, java.util.function.Consumer<Message> retryAction) {
        int currentRetries = retryCounts.computeIfAbsent(message.getMessageId(), k -> new AtomicInteger(0)).incrementAndGet();
        if (currentRetries > delegate.getConsumerConfig().getMaxRetryTimes()) {
            log.warn("Message {} has exceeded max retry times ({}). Sending to DLQ.", message.getMessageId(), delegate.getConsumerConfig().getMaxRetryTimes());
            sendToDLQ(message);
            commitOffsetAsync(message);
            retryCounts.remove(message.getMessageId());
        } else {
            log.debug("Scheduling retry #{} for message {}", currentRetries, message.getMessageId());
            // 【修改】使用配置的重试延迟，而不是硬编码的5秒
            retryScheduler.schedule(() -> retryAction.accept(message), delegate.getConsumerConfig().getRetryDelayMillis(), TimeUnit.MILLISECONDS);
        }
    }

    private void handleBatchRetry(List<Message> messages, java.util.function.Consumer<List<Message>> retryAction) {
        long batchId = messages.get(0).getMessageId();
        int currentRetries = retryCounts.computeIfAbsent(batchId, k -> new AtomicInteger(0)).incrementAndGet();
        if (currentRetries > delegate.getConsumerConfig().getMaxRetryTimes()) {
            log.warn("Message batch starting with {} has exceeded max retry times. Sending to DLQ.", batchId);
            messages.forEach(this::sendToDLQ);
            commitOffsetAsync(messages.get(messages.size() - 1));
            retryCounts.remove(batchId);
        } else {
            log.debug("Scheduling retry #{} for message batch starting with {}", currentRetries, batchId);
            // 【修改】使用配置的重试延迟，而不是硬编码的5秒
            retryScheduler.schedule(() -> retryAction.accept(messages), delegate.getConsumerConfig().getRetryDelayMillis(), TimeUnit.MILLISECONDS);
        }
    }

    private void sendToDLQ(Message message) {
        if (dlqProducer == null) {
            synchronized (dlqProducerLock) {
                if (dlqProducer == null) {
                    try {
                        this.dlqProducer = dlqProducerFactory.apply(this.delegate.getBootstrapBrokerAddress());
                        this.dlqProducer.start();
                    } catch (Exception e) {
                        log.error("Failed to create or start DLQ producer. Message will be lost: {}", message, e);
                        return;
                    }
                }
            }
        }
        try {
            Message dlqMessage = message.toBuilder()
                    .topic(DLQ_PREFIX + message.getTopic())
                    .property("originalTopic", message.getTopic())
                    .property("consumerGroup", delegate.getConsumerConfig().getConsumerGroup())
                    .build();
            dlqProducer.send(dlqMessage);
        } catch (Exception e) {
            log.error("Failed to send message to DLQ. Message will be lost: {}", message, e);
        }
    }

    private CompletableFuture<Long> fetchOffset(TopicPartition tp) {
        Message request = Message.builder()
                .type(MessageType.FETCH_OFFSET)
                .topic(tp.getTopic())
                .property("consumerGroup", delegate.getConsumerConfig().getConsumerGroup())
                .property("partition", String.valueOf(tp.getPartitionId()))
                .build();
        return delegate.sendRequestToPartitionLeader(request, tp).thenApply(response -> {
            if (response.getStatus() == MessageStatus.SUCCESS && response.getProperty("offset") != null) {
                return Long.parseLong(response.getProperty("offset"));
            }
            log.warn("Failed to fetch offset for {} partition {}: {}", tp.getTopic(), tp.getPartitionId(), response.getErrorMessage());
            return -1L;
        });
    }

    private CompletableFuture<Void> commitOffsetAsync(Message message) {
        if (message.getProperty("partition") == null) {
            String errorMsg = "Cannot commit offset, message is missing 'partition' property: " + message;
            log.error(errorMsg);
            return ConsumerDelegate.failedFuture(new SnackMQClientException(errorMsg));
        }
        TopicPartition tp = new TopicPartition(message.getTopic(), Integer.parseInt(message.getProperty("partition")));
        Message commitRequest = Message.builder()
                .type(MessageType.COMMIT_OFFSET)
                .topic(message.getTopic())
                .property("consumerGroup", this.delegate.getConsumerConfig().getConsumerGroup())
                .property("partition", message.getProperty("partition"))
                .property("offset", message.getProperty("offset"))
                .build();
        CompletableFuture<Void> commitFuture = commitOffsetWithRetryAsync(commitRequest, tp, MAX_COMMIT_REDIRECT_RETRIES);
        inflightPartitionCommits.put(tp, commitFuture);
        commitFuture.whenComplete((v, t) -> inflightPartitionCommits.remove(tp, commitFuture));
        pendingCommits.add(commitFuture);
        commitFuture.whenComplete((v, t) -> pendingCommits.remove(commitFuture));
        return commitFuture;
    }

    private CompletableFuture<Void> commitOffsetWithRetryAsync(Message commitRequest, TopicPartition tp, int retriesLeft) {
        if (retriesLeft <= 0) {
            log.error("Failed to commit offset for {} after multiple retries. Giving up.", tp);
            return ConsumerDelegate.failedFuture(new SnackMQClientException("Failed to commit offset for " + tp + " after all retries."));
        }

        return delegate.sendRequestToMetadataLeader(commitRequest).thenCompose(response -> {
            if (response.getStatus() == MessageStatus.SUCCESS) {
                log.trace("Successfully committed offset for partition {}", tp);
                return CompletableFuture.completedFuture(null);
            }

            if (response.getStatus() == MessageStatus.REDIRECT && response.getProperty("redirectAddress") != null) {
                final String newLeaderAddress = response.getProperty("redirectAddress");
                log.warn("Metadata server redirected offset commit for {} to {}. This is unexpected. Retrying with new address...", tp, newLeaderAddress);
                try {
                    NettyClient newLeaderClient = delegate.getOrCreateClient(newLeaderAddress);
                    return newLeaderClient.sendRequest(delegate.addAuthToken(commitRequest))
                            .thenCompose(redirectedResponse -> {
                                if (redirectedResponse.getStatus() == MessageStatus.SUCCESS) {
                                    return CompletableFuture.completedFuture(null);
                                }
                                log.error("Failed to commit offset for {} on redirect: {}", tp, redirectedResponse.getErrorMessage());
                                return ConsumerDelegate.failedFuture(new SnackMQClientException("Failed to commit offset for " + tp + " even after redirect."));
                            });
                } catch (Exception e) {
                    log.error("Exception during offset commit redirect for {}", tp, e);
                    return ConsumerDelegate.failedFuture(e);
                }
            }

            log.warn("Failed to commit offset for {}: {}. Retries left: {}. Retrying in 200ms...",
                    tp, response.getErrorMessage(), retriesLeft - 1);
            CompletableFuture<Void> retryFuture = new CompletableFuture<>();
            metadataScheduler.schedule(() -> commitOffsetWithRetryAsync(commitRequest, tp, retriesLeft - 1)
                    .whenComplete((v, ex) -> {
                        if (ex != null) {
                            retryFuture.completeExceptionally(ex);
                        } else {
                            retryFuture.complete(v);
                        }
                    }), 200, TimeUnit.MILLISECONDS);
            return retryFuture;
        });
    }

    private class PartitionProcessor implements Runnable {
        private final TopicPartition partition;
        private final OrderedMessageListener listener;
        private final BlockingQueue<Message> messageQueue = new LinkedBlockingQueue<>();
        private final AtomicBoolean processing = new AtomicBoolean(false);
        private final ScheduledExecutorService retryScheduler;

        public PartitionProcessor(TopicPartition partition, OrderedMessageListener listener, ScheduledExecutorService retryScheduler) {
            this.partition = partition;
            this.listener = listener;
            this.retryScheduler = retryScheduler;
        }

        public void addMessages(List<Message> messages) {
            messageQueue.addAll(messages);
        }

        public void schedule() {
            if (consumeExecutor != null && processing.compareAndSet(false, true)) {
                consumeExecutor.submit(this);
            } else if (consumeExecutor == null) {
                log.warn("Cannot schedule ordered consumption for partition {} because consumeThreadNums is 0.", partition);
            }
        }

        public boolean isBusy() {
            return processing.get() || !messageQueue.isEmpty();
        }

        @Override
        public void run() {
            Message message = messageQueue.peek();
            if (message == null) {
                processing.set(false);
                return;
            }
            try {
                ConsumeResult result = doProcessOrderedMessage(message, listener);
                if (result == ConsumeResult.SUCCESS) {
                    messageQueue.poll();
                    if (messageQueue.isEmpty()) {
                        processing.set(false);
                    } else {
                        consumeExecutor.submit(this);
                    }
                } else { // RETRY
                    log.debug("Ordered consumption for partition {} is paused. Scheduling retry in {}ms.",
                            partition, delegate.getConsumerConfig().getOrderedRetryDelayMillis());
                    processing.set(false);
                    retryScheduler.schedule(this::schedule, delegate.getConsumerConfig().getOrderedRetryDelayMillis(), TimeUnit.MILLISECONDS);
                }
            } catch (Throwable t) {
                log.error("Unexpected error in PartitionProcessor for partition {}. Scheduling retry.", partition, t);
                processing.set(false);
                retryScheduler.schedule(this::schedule, delegate.getConsumerConfig().getOrderedRetryDelayMillis(), TimeUnit.MILLISECONDS);
            }
        }
    }

    // --- Pull 模式 API (不支持) ---

    @Override
    public List<Message> pullList(String topic, int maxMessages, long timeoutMs) throws SnackMQClientException {
        throw new UnsupportedOperationException("pullList is not supported in push-style consumer. Please use DefaultPullConsumer.");
    }

    @Override
    public List<Message> pullList(String topic, int partition, long offset, int maxMessages, long timeoutMs) throws SnackMQClientException {
        throw new UnsupportedOperationException("pullList is not supported in push-style consumer. Please use DefaultPullConsumer.");
    }

    @Override
    public PullResult pullResult(String topic, int maxMessages, long timeoutMs) throws SnackMQClientException {
        throw new UnsupportedOperationException("pullResult is not supported in push-style consumer. Please use DefaultPullConsumer.");
    }

    @Override
    public PullResult pullResult(String topic, int partition, long offset, int maxMessages, long timeoutMs) throws SnackMQClientException {
        throw new UnsupportedOperationException("pullResult is not supported in push-style consumer. Please use DefaultPullConsumer.");
    }

    @Override
    public void commitOffset(Message message) throws SnackMQClientException {
        throw new UnsupportedOperationException("commitOffset is not supported in push-style consumer. Please use DefaultPullConsumer.");
    }
}