// D:/Developments/lmworkspace/snackmq/cluster/src/main/java/com/aiwiown/snackmq/cluster/raft/SnackMQStateMachine.java

package com.aiwiown.snackmq.cluster.raft;

import com.aiwiown.snackmq.cluster.metadata.MetadataManager;
import com.aiwiown.snackmq.cluster.metadata.TopicMetadata;
import com.aiwiown.snackmq.cluster.raft.entity.SnackMQRaftLogEntry;
import com.aiwiown.snackmq.cluster.routing.Exchange;
import com.aiwiown.snackmq.common.message.Message;
import com.aiwiown.snackmq.common.metrics.OffsetQueryable;
import com.aiwiown.snackmq.common.protocol.TopicPartition;
import com.aiwiown.snackmq.storage.StorageService;
import com.aiwiown.snackmq.storage.exception.PartitionNotFoundException;
import com.aiwiown.snackmq.storage.partition.LogPartition;
import com.aiwiown.snackmq.storage.service.PartitionedStorageService;
import com.aiwiown.snackmq.storage.timer.TimerMessageStore;
import com.alipay.sofa.jraft.Closure;
import com.alipay.sofa.jraft.Iterator;
import com.alipay.sofa.jraft.Status;
import com.alipay.sofa.jraft.core.StateMachineAdapter;
import com.alipay.sofa.jraft.error.RaftError;
import com.alipay.sofa.jraft.storage.snapshot.SnapshotReader;
import com.alipay.sofa.jraft.storage.snapshot.SnapshotWriter;
import com.google.gson.Gson;
import lombok.extern.slf4j.Slf4j;

import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;

/**
 * 【已修复】SnackMQStateMachine 类实现了 StateMachineAdapter，并作为偏移量查询的接口。
 * 它负责处理 Raft 日志条目的应用，维护最后应用的索引，并管理消息的存储和偏移量的提交。
 * 此版本修复了在恢复期间因错误检查而导致的死锁问题。
 */
@Slf4j
public class SnackMQStateMachine extends StateMachineAdapter implements OffsetQueryable {

    private final StorageService storageService;
    private final MetadataManager metadataManager;
    private final String currentBrokerAddress;
    private final ConcurrentMap<String, AtomicLong> offsetTable = new ConcurrentHashMap<>();
    private final ConcurrentMap<String, AtomicLong> partitionHighWatermarks = new ConcurrentHashMap<>();
    private final Map<TopicPartition, LogPartition> activePartitions;
    private static final String SNAPSHOT_FILE_NAME = "data_statemachine.snapshot";
    private final Gson gson = new Gson();
    private final TimerMessageStore timerMessageStore;
    private volatile boolean isRecovering = true;
    private volatile long recoveryStartTime = 0;
    private volatile long lastAppliedIndex = 0;
    private volatile long lastAppliedTime = 0;

    public SnackMQStateMachine(StorageService storageService, MetadataManager metadataManager,
                               String currentBrokerAddress, String raftDataPath,
                               Map<TopicPartition, LogPartition> activePartitions,
                               TimerMessageStore timerMessageStore) {
        this.storageService = storageService;
        this.metadataManager = metadataManager;
        this.currentBrokerAddress = currentBrokerAddress;
        this.activePartitions = activePartitions;
        this.timerMessageStore = timerMessageStore;
        this.isRecovering = true;
        this.recoveryStartTime = System.currentTimeMillis();
        log.info("SnackMQStateMachine 初始化，进入恢复模式");
    }

    @Override
    public void onApply(final Iterator iter) {
        while (iter.hasNext()) {
            final Closure closure = iter.done();
            try {
                long currentIndex = iter.getIndex();
                this.lastAppliedIndex = currentIndex;
                this.lastAppliedTime = System.currentTimeMillis();

                SnackMQRaftLogEntry logEntry = SnackMQRaftLogEntry.fromBytes(iter.getData().array());
                if (logEntry == null) {
                    throw new IllegalStateException("Failed to deserialize log entry at index " + iter.getIndex());
                }
                applyLogEntry(logEntry, iter.getIndex());
                if (closure != null) {
                    closure.run(Status.OK());
                }
            } catch (Exception e) {
                log.error("Failed to apply log entry at index {}", iter.getIndex(), e);
                if (closure != null) {
                    closure.run(new Status(RaftError.EINTERNAL, "State machine apply failed."));
                }
            } finally {
                iter.next();
            }
        }
    }

    private void applyLogEntry(SnackMQRaftLogEntry logEntry, long commitLogOffset) throws IOException {
        switch (logEntry.getOperationType()) {
            case PRODUCE:
                applyProduceOperation(logEntry, commitLogOffset);
                break;
            case COMMIT_OFFSET:
                applyCommitOffsetOperation(logEntry);
                break;
            case CLEAN_TOPIC_DATA:
                applyCleanTopicDataOperation(logEntry);
                break;
            default:
                log.warn("Unsupported operation type in data state machine: {}", logEntry.getOperationType());
                break;
        }
    }

    private void applyCleanTopicDataOperation(SnackMQRaftLogEntry logEntry) {
        Message cleanupMessage = logEntry.getData(Message.class);
        if (cleanupMessage == null || cleanupMessage.getTopic() == null) {
            log.error("Invalid CLEAN_TOPIC_DATA operation: log entry is null or has no topic.");
            return;
        }
        String topicToClean = cleanupMessage.getTopic();
        log.info("Applying data cleanup for topic '{}'.", topicToClean);
        this.partitionHighWatermarks.keySet().removeIf(key -> key.startsWith(topicToClean + ":"));
        this.offsetTable.keySet().removeIf(key -> key.contains(":" + topicToClean + ":"));
        this.storageService.deleteTopic(topicToClean);
        log.info("Data cleanup for topic '{}' completed.", topicToClean);
    }

    private void applyProduceOperation(SnackMQRaftLogEntry logEntry, long commitLogOffset) throws IOException {
        Message messageToStore = logEntry.getData(Message.class);
        if (messageToStore == null) {
            throw new IllegalStateException("PRODUCE operation log entry has null data.");
        }
        if (messageToStore.isBatch()) {
            applyMessageBatchToStorage(messageToStore, commitLogOffset);
        } else {
            applySingleMessage(messageToStore, commitLogOffset);
        }
    }

    private void applyMessageBatchToStorage(Message batchContainer, long commitLogOffset) throws IOException {
        List<Message> messages = batchContainer.getMessages();
        if (messages == null || messages.isEmpty()) {
            return;
        }

        List<Message> normalMessages = new ArrayList<>();
        for (Message message : messages) {
            if (message.getDelayTimeInSeconds() > 0) {
                this.timerMessageStore.enqueue(message, commitLogOffset);
            } else {
                normalMessages.add(message);
            }
        }

        if (normalMessages.isEmpty()) {
            return;
        }

        String topic = batchContainer.getTopic();
        if (topic == null && !normalMessages.isEmpty()) {
            topic = normalMessages.get(0).getTopic();
            if (topic == null) {
                log.error("CRITICAL: Batch container and its messages have null topic. Cannot store. Batch size: {}", normalMessages.size());
                return;
            }
        }
        int partitionId = Integer.parseInt(batchContainer.getProperty("partition", "0"));
        TopicPartition topicPartition = new TopicPartition(topic, partitionId);

        // 1. 状态更新：无条件执行，以保证所有 Raft 节点状态一致。
        String highWatermarkKey = topic + ":" + partitionId;
        AtomicLong hwm = this.partitionHighWatermarks.computeIfAbsent(highWatermarkKey, k -> new AtomicLong(-1L));
        long startOffset = hwm.get() + 1;
        long endOffset = startOffset + normalMessages.size() - 1;
        hwm.set(endOffset); // 立即更新高水位线状态

        // 2. 副作用检查 (Leader 检查)
        boolean isAssignedToCurrentBroker = this.metadataManager.isPartitionAssignedToBroker(
                topicPartition, this.currentBrokerAddress);

        if (!isAssignedToCurrentBroker) {
            log.debug("Broker {} is not the assigned leader for partition {}, skipping physical storage write.",
                    this.currentBrokerAddress, topicPartition);
            return;
        }

        // 3. 【新增】幂等性检查，防止日志重放时重复写入
        long nextStorageOffset = this.storageService.getNextOffset(topic, partitionId);
        if (startOffset < nextStorageOffset) {
            log.warn("Idempotency check failed for partition {}. State machine wants to write from offset {}, but storage is already at {}. " +
                            "This is likely a log replay after restart. Skipping physical write to prevent duplication.",
                    topicPartition, startOffset, nextStorageOffset);
            return;
        }

        // 检查分区恢复状态（这仍然是一个只与本地存储相关的副作用检查）
        if (((PartitionedStorageService) this.storageService).isPartitionRecovering(topicPartition)) {
            log.warn("Skipping batch write to partition {} because the specific partition is still recovering.", topicPartition);
            return;
        }

        // 特殊路由逻辑（也是一种副作用）
        if ("__EXCHANGE_ROUTING__".equals(topic)) {
            log.info("Processing exchange routing batch with {} messages", normalMessages.size());
            for (Message message : normalMessages) {
                applySingleMessage(message, commitLogOffset);
            }
            return;
        }

        // 4. 执行写入磁盘的副作用
        this.storageService.appendBatch(topicPartition, normalMessages, startOffset, commitLogOffset);
    }

//    private void applyMessageBatchToStorage(Message batchContainer, long commitLogOffset) throws IOException {
//        List<Message> messages = batchContainer.getMessages();
//        if (messages == null || messages.isEmpty()) {
//            return;
//        }
//
//        List<Message> normalMessages = new ArrayList<>();
//        for (Message message : messages) {
//            if (message.getDelayTimeInSeconds() > 0) {
//                this.timerMessageStore.enqueue(message, commitLogOffset);
//            } else {
//                normalMessages.add(message);
//            }
//        }
//
//        if (normalMessages.isEmpty()) {
//            return;
//        }
//
//        String topic = batchContainer.getTopic();
//        if (topic == null && !normalMessages.isEmpty()) {
//            topic = normalMessages.get(0).getTopic();
//            if (topic == null) {
//                log.error("CRITICAL: Batch container and its messages have null topic. Cannot store. Batch size: {}", normalMessages.size());
//                return;
//            }
//        }
//        int partitionId = Integer.parseInt(batchContainer.getProperty("partition", "0"));
//        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
//
//        // --- 【根本原因修复】: 分离状态更新和副作用 ---
//
//        // 1. 状态更新：无条件执行，以保证所有 Raft 节点状态一致。
//        String highWatermarkKey = topic + ":" + partitionId;
//        AtomicLong hwm = this.partitionHighWatermarks.computeIfAbsent(highWatermarkKey, k -> new AtomicLong(-1L));
//        long startOffset = hwm.get() + 1;
//        long endOffset = startOffset + normalMessages.size() - 1;
//        hwm.set(endOffset); // 立即更新高水位线状态
//
//        // 2. 副作用：有条件执行，只在被分配的 Leader 节点上写入磁盘。
//        boolean isAssignedToCurrentBroker = this.metadataManager.isPartitionAssignedToBroker(
//                topicPartition, this.currentBrokerAddress);
//
//        if (!isAssignedToCurrentBroker) {
//            log.debug("Broker {} is not the assigned leader for partition {}, skipping physical storage write.",
//                    this.currentBrokerAddress, topicPartition);
//            return; // 正确地跳过副作用，但状态已经更新。
//        }
//
//        // 检查分区恢复状态（这仍然是一个只与本地存储相关的副作用检查）
//        if (((PartitionedStorageService) this.storageService).isPartitionRecovering(topicPartition)) {
//            log.warn("Skipping batch write to partition {} because the specific partition is still recovering.", topicPartition);
//            return;
//        }
//
//        // 特殊路由逻辑（也是一种副作用）
//        if ("__EXCHANGE_ROUTING__".equals(topic)) {
//            log.info("Processing exchange routing batch with {} messages", normalMessages.size());
//            for (Message message : normalMessages) {
//                applySingleMessage(message, commitLogOffset);
//            }
//            return;
//        }
//        this.storageService.appendBatch(topicPartition, normalMessages, startOffset, commitLogOffset);
//        hwm.set(endOffset);
//    }

    private void applySingleMessage(Message message, long commitLogOffset) throws IOException {
        if (message.getDelayTimeInSeconds() > 0) {
            this.timerMessageStore.enqueue(message, commitLogOffset);
        } else {
            String exchangeName = message.getProperty("exchangeName");
            if (exchangeName != null && !exchangeName.isEmpty()) {
                applyExchangeRouting(message, commitLogOffset);
            } else {
                applyMessageToStorage(message, commitLogOffset);
            }
        }
    }

    private void applyExchangeRouting(Message message, long commitLogOffset) throws IOException {
        String exchangeName = message.getProperty("exchangeName");
        if (exchangeName == null || exchangeName.isEmpty()) {
            applyMessageToStorage(message, commitLogOffset);
            return;
        }

        Exchange exchange = this.metadataManager.getExchange(exchangeName);
        if (exchange == null) {
            log.warn("Exchange '{}' not found, message will be dropped: {}", exchangeName, message.getMessageId());
            return;
        }

        List<String> targetQueues = exchange.route(message);
        if (targetQueues.isEmpty()) {
            log.debug("No queues matched for message sent to exchange '{}' with key '{}'", exchangeName, message.getKey());
            return;
        }

        log.info("Routing message {} from exchange '{}' to {} queues: {}",
                message.getMessageId(), exchangeName, targetQueues.size(), targetQueues);

        // 将消息路由到每个匹配的队列
        for (String queueName : targetQueues) {
            try {
                // 为每个目标队列创建新的消息副本
                Message routedMessage = message.toBuilder()
                        .topic(queueName) // 设置目标队列作为主题
                        .property("originalExchange", exchangeName) // 保留原始交换机信息
                        .property("routedFromExchange", "true") // 标记这是路由后的消息
                        .build();

                // 将路由后的消息存储到目标队列
                applyMessageToStorage(routedMessage, commitLogOffset);
            } catch (Exception e) {
                log.error("Failed to route message {} to queue '{}'", message.getMessageId(), queueName, e);
                // 继续处理其他队列，不因为单个队列失败而中断整个路由过程
            }
        }
    }

    /**
     * 【新增】处理位点提交操作。
     *
     * @param logEntry 包含位点提交信息的 Raft 日志条目
     */
    private void applyCommitOffsetOperation(SnackMQRaftLogEntry logEntry) {
        Message offsetMessage = logEntry.getData(Message.class);
        if (offsetMessage == null) {
            throw new IllegalStateException("COMMIT_OFFSET operation log entry has null data.");
        }

        // 从消息属性中解析出位点信息
        String consumerGroup = offsetMessage.getProperty("consumerGroup");
        String topic = offsetMessage.getTopic();
        int partitionId = Integer.parseInt(offsetMessage.getProperty("partition"));
        long offset = Long.parseLong(offsetMessage.getProperty("offset"));

        // 调用内部方法，将位点提交到内存中的 offsetTable
        commitOffset(consumerGroup, topic, partitionId, offset);
    }

    /**
     * 将消息应用到存储中。
     * 【修改】此方法现在接收 commitLogOffset，并将其作为属性添加到消息中。
     *
     * @param message         要存储的消息
     * @param commitLogOffset 消息在全局 CommitLog 中的物理偏移量
     * @throws IOException 当消息应用到存储时发生错误
     */
    private void applyMessageToStorage(Message message, long commitLogOffset) throws IOException {
        String topic = message.getTopic();
        int partitionId = Integer.parseInt(message.getProperty("partition", "0"));
        TopicPartition topicPartition = new TopicPartition(topic, partitionId);

        // 1. 状态更新：无条件执行，以保证所有 Raft 节点状态一致。
        String highWatermarkKey = topic + ":" + partitionId;
        AtomicLong hwm = this.partitionHighWatermarks.computeIfAbsent(highWatermarkKey, k -> new AtomicLong(-1L));
        long offset = hwm.incrementAndGet();

        // 2. 副作用检查 (Leader 检查)
        boolean isAssignedToCurrentBroker = this.metadataManager.isPartitionAssignedToBroker(
                topicPartition, this.currentBrokerAddress);

        if (!isAssignedToCurrentBroker) {
            log.debug("Broker {} is not the assigned leader for partition {}, skipping physical storage write.",
                    this.currentBrokerAddress, topicPartition);
            return;
        }

        // 3. 【新增】幂等性检查
        long nextStorageOffset = this.storageService.getNextOffset(topic, partitionId);
        if (offset < nextStorageOffset) {
            log.warn("Idempotency check failed for partition {}. State machine wants to write offset {}, but storage is already at {}. " +
                            "Skipping physical write.",
                    topicPartition, offset, nextStorageOffset);
            return;
        }

        // 检查分区恢复状态（这仍然是一个只与本地存储相关的副作用检查）
        if (((PartitionedStorageService) this.storageService).isPartitionRecovering(topicPartition)) {
            log.warn("Skipping single message write to partition {} because the specific partition is still recovering.", topicPartition);
            return;
        }

        // 4. 执行写入磁盘的副作用
        Message messageWithPhysicalOffset = message.toBuilder()
                .property(Message.PROPERTY_PHYSICAL_OFFSET, String.valueOf(commitLogOffset))
                .property("offset", String.valueOf(offset))
                .property("partition", String.valueOf(partitionId))
                .build();
        try {
            this.storageService.append(topicPartition, messageWithPhysicalOffset, offset);
        } catch (PartitionNotFoundException e) {
            log.error("CRITICAL: Broker {} failed to find partition {}! Metadata might be inconsistent.",
                    this.currentBrokerAddress, topicPartition, e);
            log.warn("Continuing despite partition not found error during failover scenario.");
        }
    }


    /**
     * 在关闭状态机时调用，保存最后应用的索引。
     */
    @Override
    public void onShutdown() {
        log.info("SnackMQStateMachine is shutting down");
    }

    /**
     * 【新增】一个私有内部类，用于封装所有需要持久化到快照的状态。
     */
    private static class StateMachineSnapshot {
        Map<String, Long> consumerOffsets;
        Map<String, Long> highWatermarks;
    }

    /**
     * 保存快照时调用，将 offsetTable 持久化到快照中。
     *
     * @param writer 快照写入器
     * @param done   完成回调
     */
    @Override
    public void onSnapshotSave(SnapshotWriter writer, Closure done) {
        // 【最终修复】: 在保存快照前，执行一次状态清理。
        if (metadataManager != null) {
            try {
                log.info("Cleaning up state before saving snapshot...");
                final Set<String> activeTopics = this.metadataManager.getAllTopics().stream()
                        .map(TopicMetadata::getTopicName)
                        .collect(Collectors.toSet());

                // 移除已删除 Topic 的高水位线
                this.partitionHighWatermarks.keySet().removeIf(key -> {
                    int lastColon = key.lastIndexOf(':');
                    if (lastColon == -1) return true; // 移除格式不正确的 key
                    return !activeTopics.contains(key.substring(0, lastColon));
                });

                // 移除已删除 Topic 的消费者位点
                this.offsetTable.keySet().removeIf(key -> {
                    String[] parts = key.split(":", 3);
                    // key 格式为 "consumerGroup:topic:partitionId"
                    return parts.length == 3 && !activeTopics.contains(parts[1]);
                });
                log.info("State cleanup complete.");
            } catch (Exception e) {
                log.error("Error during pre-snapshot state cleanup. Snapshot may contain stale data.", e);
            }
        }

        // 后续的快照保存逻辑保持不变...
        StateMachineSnapshot snapshot = new StateMachineSnapshot();
        snapshot.consumerOffsets = this.offsetTable.entrySet().stream()
                .collect(Collectors.toConcurrentMap(Map.Entry::getKey, e -> e.getValue().get()));
        snapshot.highWatermarks = this.partitionHighWatermarks.entrySet().stream()
                .collect(Collectors.toConcurrentMap(Map.Entry::getKey, e -> e.getValue().get()));

        String snapshotPath = writer.getPath();
        File snapshotFile = new File(snapshotPath, SNAPSHOT_FILE_NAME);

        try {
            String jsonState = gson.toJson(snapshot);
            Files.write(snapshotFile.toPath(), jsonState.getBytes());
            log.info("Successfully saved data state machine snapshot with {} consumer offsets and {} high watermarks.",
                    snapshot.consumerOffsets.size(), snapshot.highWatermarks.size());

            if (writer.addFile(SNAPSHOT_FILE_NAME)) {
                done.run(Status.OK());
            } else {
                log.error("Failed to add file to snapshot writer: {}", snapshotFile.getAbsolutePath());
                done.run(new Status(RaftError.EIO, "Failed to add file to snapshot"));
            }
        } catch (IOException e) {
            log.error("Failed to save offset table snapshot", e);
            done.run(new Status(RaftError.EIO, "Failed to save snapshot"));
        }
    }

    /**
     * 加载快照时调用，从快照中恢复 offsetTable。
     *
     * @param reader 快照读取器
     */
    @Override
    public boolean onSnapshotLoad(SnapshotReader reader) {
        if (reader.listFiles() == null || !reader.listFiles().contains(SNAPSHOT_FILE_NAME)) {
            log.warn("Snapshot file {} not found in reader.", SNAPSHOT_FILE_NAME);
            return true;
        }
        String snapshotPath = reader.getPath();
        File snapshotFile = new File(snapshotPath, SNAPSHOT_FILE_NAME);

        try {
            String jsonState = new String(Files.readAllBytes(snapshotFile.toPath()));
            // 【修复】从通用的快照对象中加载数据
            StateMachineSnapshot snapshot = gson.fromJson(jsonState, StateMachineSnapshot.class);

            if (snapshot.consumerOffsets != null) {
                offsetTable.clear();
                snapshot.consumerOffsets.forEach((key, value) -> offsetTable.put(key, new AtomicLong(value)));
            }

            if (snapshot.highWatermarks != null) {
                partitionHighWatermarks.clear();
                snapshot.highWatermarks.forEach((key, value) -> partitionHighWatermarks.put(key, new AtomicLong(value)));
            }

            log.info("Successfully loaded data state machine snapshot with {} consumer offsets and {} high watermarks.",
                    offsetTable.size(), partitionHighWatermarks.size());

            return true;
        } catch (IOException e) {
            log.error("Failed to load offset table snapshot", e);
            return false;
        }
    }

    /**
     * 提交偏移量。
     *
     * @param consumerGroup 消费者组
     * @param topic         主题
     * @param partitionId   分区ID
     * @param offset        偏移量
     */
    public void commitOffset(String consumerGroup, String topic, int partitionId, long offset) {
        String key = buildOffsetKey(consumerGroup, topic, String.valueOf(partitionId));
        offsetTable.computeIfAbsent(key, k -> new AtomicLong(0)).set(offset);
        log.debug("Committed offset for {}: {}", key, offset);
    }

    /**
     * 获取偏移量。
     *
     * @param consumerGroup 消费者组
     * @param topic         主题
     * @param partitionId   分区ID
     * @return 当前的偏移量
     */
    @Override
    public long fetchOffset(String consumerGroup, String topic, int partitionId) {
        String key = buildOffsetKey(consumerGroup, topic, String.valueOf(partitionId));
        return offsetTable.getOrDefault(key, new AtomicLong(-1L)).get();
    }

    /**
     * 构建偏移量键。
     *
     * @param parts 键的组成部分
     * @return 构建的键
     */
    private String buildOffsetKey(String... parts) {
        return String.join(":", parts);
    }

    /**
     * 获取存储服务实例。
     * 用于外部组件访问存储层的功能。
     *
     * @return 存储服务实例
     */
    public StorageService getStorageService() {
        return this.storageService;
    }

    /**
     * 【新增】获取所有消费者组的位移信息。
     * 用于控制台展示。
     *
     * @return 一个嵌套的 Map，结构为: { consumerGroup -> { "topic:partition" -> offset } }
     */
    public Map<String, Map<String, Long>> getAllConsumerGroupOffsets() {
        Map<String, Map<String, Long>> result = new ConcurrentHashMap<>();
        offsetTable.forEach((key, value) -> {
            // key 的格式是 "consumerGroup:topic:partitionId"
            String[] parts = key.split(":", 3);
            if (parts.length == 3) {
                String group = parts[0];
                String topicPartitionKey = parts[1] + ":" + parts[2];
                result.computeIfAbsent(group, k -> new ConcurrentHashMap<>()).put(topicPartitionKey, value.get());
            }
        });
        return result;
    }

    /**
     * 【新增】获取所有分区的高水位线信息。
     * 用于控制台计算消费者延迟。
     *
     * @return 一个 Map，结构为: { "topic:partition" -> highWatermark }
     */
    public Map<String, Long> getAllPartitionHighWatermarks() {
        return this.partitionHighWatermarks.entrySet().stream()
                .collect(Collectors.toConcurrentMap(Map.Entry::getKey, e -> e.getValue().get()));
    }

    /**
     * 【最终修复】计算整个集群的消息总数。
     * 此方法现在从已复制的、全局一致的高水位线 Map 中读取数据，
     * 不再依赖本地存储，从而解决了之前只能统计本节点领导分区的问题。
     *
     * @return 整个集群的消息总数。
     */
    public long calculateTotalClusterMessages() {
        if (metadataManager == null) {
            log.warn("MetadataManager is null, cannot accurately calculate total cluster messages. Returning 0.");
            return 0;
        }

        // 【核心修复】从元数据管理器获取当前所有活跃的 Topic 列表。
        // 这是为了确保我们不会统计那些已经被删除的 Topic 中的消息。
        final Set<String> activeTopics = this.metadataManager.getAllTopics().stream()
                .map(TopicMetadata::getTopicName)
                .collect(Collectors.toSet());

        if (activeTopics.isEmpty()) {
            return 0;
        }

        // 遍历所有记录过高水位线的分区，并进行过滤和计算
        return this.partitionHighWatermarks.entrySet().stream()
                .filter(entry -> {
                    // 从 key ("topic:partitionId") 中提取 topic 名称
                    String key = entry.getKey();
                    int lastColon = key.lastIndexOf(':');
                    if (lastColon == -1) {
                        return false; // 忽略格式不正确的 key
                    }
                    String topicName = key.substring(0, lastColon);
                    // 只保留那些 Topic 仍然活跃的条目
                    return activeTopics.contains(topicName);
                })
                .mapToLong(entry -> entry.getValue().get() + 1) // 消息位点从 0 开始，所以总数是最高位点 + 1
                .sum();
    }

    /**
     * 【新增】结束恢复状态，允许新的消息写入
     */
    public void endRecovery() {
        if (this.isRecovering) {
            this.isRecovering = false;
            long recoveryDuration = System.currentTimeMillis() - this.recoveryStartTime;
            log.info("SnackMQStateMachine 恢复完成，耗时 {}ms，已启用消息写入", recoveryDuration);

            // 【新增】同步高水位线，保证统计面板Total Messages正确
            for (Map.Entry<TopicPartition, LogPartition> entry : activePartitions.entrySet()) {
                TopicPartition tp = entry.getKey();
                LogPartition partition = entry.getValue();
                long lastOffset = partition.getNextOffset() - 1;
                String key = tp.getTopic() + ":" + tp.getPartitionId();
                partitionHighWatermarks.put(key, new AtomicLong(lastOffset));
                log.info("同步高水位线: {} = {}", key, lastOffset);
            }
        }
    }

    /**
     * 【新增】检查是否正在恢复中
     *
     * @return 是否正在恢复中
     */
    public boolean isRecovering() {
        return this.isRecovering;
    }

    /**
     * 【新增】检查Raft日志重放是否完成
     * 如果超过5秒没有新的日志应用，则认为重放完成
     *
     * @return 如果重放完成返回true
     */
    public boolean isRaftLogReplayComplete() {
// 【最终修复】: 增强恢复完成的检查逻辑，以正确处理无日志可重放的首次启动场景。
        // 旧的逻辑在这种情况下会无限期等待，导致 Broker 启动挂起。
        long currentTime = System.currentTimeMillis();

        // 场景一: 已经应用过日志。检查距离上次应用是否已超过5秒。
        if (this.lastAppliedTime > 0) {
            long timeSinceLastApply = currentTime - this.lastAppliedTime;
            return timeSinceLastApply > 5000; // 5秒超时
        }

        // 场景二: 从未应用过任何日志（例如首次启动）。
        // 检查自状态机初始化以来是否已超过一个宽限期（例如10秒）。
        // 这可以确认系统并非正在等待加载日志，而是确实无日志可重放。
        if (this.recoveryStartTime > 0) {
            long timeSinceInit = currentTime - this.recoveryStartTime;
            // 如果启动已超过10秒且仍未应用任何日志，则安全地认为恢复已完成。
            return timeSinceInit > 10000;
        }

        // 默认情况，如果起始时间也未设置，则认为未完成。
        return false;
    }

    /**
     * 【新增】获取Raft日志重放状态信息
     *
     * @return 重放状态信息
     */
    public String getRaftLogReplayStatus() {
        return String.format("lastAppliedIndex=%d, lastAppliedTime=%d, timeSinceLastApply=%dms",
                this.lastAppliedIndex, this.lastAppliedTime,
                this.lastAppliedTime > 0 ? System.currentTimeMillis() - this.lastAppliedTime : 0);
    }
}