// D:/Developments/lmworkspace/snackmq/storage/src/main/java/com/aiwiown/snackmq/storage/partition/LogPartition.java

package com.aiwiown.snackmq.storage.partition;

import com.aiwiown.snackmq.common.message.Message;
import com.aiwiown.snackmq.common.protocol.TopicPartition;
import com.aiwiown.snackmq.storage.segment.LogSegment;
import com.aiwiown.snackmq.storage.segment.SegmentFullException;
import com.aiwiown.snackmq.storage.service.PartitionedStorageService;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;

import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.concurrent.ConcurrentSkipListMap;

/**
 * Represents a partition of a topic.
 * It manages a series of LogSegment files and is responsible for appending and reading messages.
 */
@Slf4j
@Getter
public class LogPartition implements Closeable {

    private final String topic;
    private final int partitionId;
    private final String partitionPath;
    private final long segmentSize;
    private final int indexInterval;
    // 【新增】存储恢复策略配置
    private final boolean forceFullLogRecovery;

    private final NavigableMap<Long, LogSegment> segments = new ConcurrentSkipListMap<>();
    private volatile LogSegment activeSegment;
    private volatile boolean isRecovering = false;
    private PartitionedStorageService storageService = null;

    // This is the single source of truth for the next offset.
    // All modifications are protected by synchronized methods.
    private long nextOffset = 0;


    public LogPartition(String storagePath, String topic, int partitionId, long segmentSize, int indexInterval, boolean forceFullLogRecovery) {
        this.topic = topic;
        this.partitionId = partitionId;
        this.partitionPath = storagePath + File.separator + topic + "-" + partitionId;
        this.segmentSize = segmentSize;
        this.indexInterval = indexInterval;
        this.forceFullLogRecovery = forceFullLogRecovery;
        File dir = new File(this.partitionPath);
        if (!dir.exists()) {
            if (!dir.mkdirs()) {
                log.error("Failed to create partition directory: {}", this.partitionPath);
            }
        }
    }

    public synchronized void setStorageService(PartitionedStorageService storageService) {
        this.storageService = storageService;
        if (storageService != null) {
            // Immediately notify that recovery is not yet complete.
            // The recovery process will notify again when it's done.
            storageService.setPartitionRecoveryStatus(new TopicPartition(topic, partitionId), true);
            log.info("Partition {}-{} has set PartitionedStorageService reference.", topic, partitionId);
        }
    }

    public synchronized void initializeAndRecover() throws IOException {
        loadSegments();
    }

    private void loadSegments() throws IOException {
        isRecovering = true;
        if (storageService != null) {
            storageService.setPartitionRecoveryStatus(new TopicPartition(topic, partitionId), true);
        }

        File dir = new File(partitionPath);
        File[] logFiles = dir.listFiles((d, name) -> name.endsWith(".log"));

        if (logFiles == null || logFiles.length == 0) {
            activeSegment = new LogSegment(partitionPath, 0, segmentSize, indexInterval, this.forceFullLogRecovery);
            segments.put(0L, activeSegment);
            this.nextOffset = 0;
        } else {
            Arrays.sort(logFiles, (f1, f2) -> Long.compare(
                    Long.parseLong(f1.getName().replace(".log", "")),
                    Long.parseLong(f2.getName().replace(".log", ""))
            ));

            long globalMaxOffset = -1L;
            log.info("Starting to load segment files for partition {}-{}, found {} files", topic, partitionId, logFiles.length);

            for (File logFile : logFiles) {
                try {
                    long baseOffset = Long.parseLong(logFile.getName().replace(".log", ""));
                    log.info("Loading segment file: {} (baseOffset: {})", logFile.getName(), baseOffset);
                    LogSegment segment = new LogSegment(partitionPath, baseOffset, segmentSize, indexInterval, this.forceFullLogRecovery);
                    segments.put(baseOffset, segment);

                    if (segment.getMaxLogicalOffset().get() > globalMaxOffset) {
                        globalMaxOffset = segment.getMaxLogicalOffset().get();
                    }
                } catch (NumberFormatException e) {
                    log.error("Could not parse segment file name: {}. Skipping this file.", logFile.getName(), e);
                } catch (IOException e) {
                    log.error("IO exception while loading segment file {}. Skipping this file.", logFile.getName(), e);
                }
            }

            if (segments.isEmpty()) {
                activeSegment = new LogSegment(partitionPath, 0, segmentSize, indexInterval, this.forceFullLogRecovery);
                segments.put(0L, activeSegment);
            } else {
                activeSegment = segments.lastEntry().getValue();
            }
            // 统一设置 nextOffset
            this.nextOffset = globalMaxOffset + 1;
        }

        log.info("Partition {}-{} loaded {} segments. Active segment starts at offset {}.",
                topic, partitionId, segments.size(), activeSegment.getBaseOffset());
        log.info("Partition {}-{} recovery complete. Active segment: {}, next offset recovered to: {}",
                topic, partitionId, activeSegment.getBaseOffset(), this.nextOffset);

        isRecovering = false;
        log.info("Partition {}-{} recovery complete, write enabled", topic, partitionId);

        if (storageService != null) {
            storageService.setPartitionRecoveryStatus(new TopicPartition(topic, partitionId), false);
        }
    }

    public synchronized Message read(long offset) throws IOException {
        Map.Entry<Long, LogSegment> entry = segments.floorEntry(offset);
        if (entry == null) {
            log.warn("No segment found for offset {} in partition {}-{}", offset, topic, partitionId);
            return null;
        }
        LogSegment segment = entry.getValue();
        return segment.read(offset);
    }

    public synchronized ByteBuffer readZeroCopy(long offset) throws IOException {
        Map.Entry<Long, LogSegment> entry = segments.floorEntry(offset);
        if (entry == null) {
            return null;
        }
        return entry.getValue().readZeroCopy(offset);
    }

    public synchronized List<ByteBuffer> readBatchZeroCopy(long startOffset, int maxMessages) throws IOException {
        Map.Entry<Long, LogSegment> entry = segments.floorEntry(startOffset);
        if (entry == null) {
            log.warn("No segment found for startOffset {} in partition {}-{}", startOffset, topic, partitionId);
            return new ArrayList<>();
        }
        LogSegment segment = entry.getValue();
        return segment.readBatchZeroCopy(startOffset, maxMessages);
    }

    /**
     * 【ULTIMATE FIX】: Atomically appends a message, handling segment rolls correctly.
     * This method is now the single source of truth for offset generation and is robust
     * against concurrency issues during segment rolls.
     *
     * @param message The message to append.
     * @return The logical offset assigned to the appended message.
     * @throws IOException If an unrecoverable I/O error occurs.
     */
    public synchronized long append(Message message) throws IOException {
        if (isRecovering) {
            log.warn("Partition {}-{} is recovering, skipping single message append.", topic, partitionId);
            return -1L;
        }

        // Loop to handle segment rolling gracefully.
        while (true) {
            // Determine the offset for this attempt *inside* the synchronized block and loop.
            long offsetToTry = this.nextOffset;

            try {
                // Attempt to append to the currently active segment.
                activeSegment.append(message, offsetToTry);

                // If successful, we can now officially consume the offset and return.
                this.nextOffset++;
                return offsetToTry;

            } catch (SegmentFullException e) {
                // The current segment is full. We need to roll to a new one.
                // The offset we just tried to use (offsetToTry) becomes the base offset for the new segment.
                log.info("Segment is full. Rolling to new segment for partition {}-{} starting at offset {}.", topic, partitionId, offsetToTry);
                rollToNewSegment(offsetToTry);
                // The loop will automatically retry the append with the same offsetToTry,
                // but this time it will be against the new, correct active segment.
            }
        }
    }

    @Deprecated
    public synchronized void append(Message message, long logicalOffset) throws IOException {
        // This method is now problematic and should not be used for new development.
        // It doesn't handle segment rolls correctly in a concurrent environment.
        if (isRecovering) {
            log.warn("Partition {}-{} is recovering, skipping single message append.", topic, partitionId);
            return;
        }
        activeSegment.append(message, logicalOffset);
        this.nextOffset = Math.max(this.nextOffset, logicalOffset + 1);
    }


    public synchronized void appendBatch(List<Message> messages, long startOffset, long commitLogOffset) throws IOException {
        if (messages == null || messages.isEmpty()) {
            return;
        }

        if (isRecovering) {
            log.warn("Partition {}-{} is recovering, skipping batch append.", topic, partitionId);
            return;
        }

        int messagesWritten = 0;
        while (messagesWritten < messages.size()) {
            final List<Message> remainingMessages = messages.subList(messagesWritten, messages.size());
            final long currentStartOffset = startOffset + messagesWritten;

            int writtenInThisAttempt = 0;
            try {
                writtenInThisAttempt = activeSegment.appendBatch(remainingMessages, currentStartOffset, commitLogOffset, this.partitionId);
            } catch (SegmentFullException e) {
                log.info("Segment for partition {}-{} is completely full, will roll.", topic, partitionId);
                writtenInThisAttempt = 0;
            }

            if (writtenInThisAttempt > 0) {
                messagesWritten += writtenInThisAttempt;
                this.nextOffset = Math.max(this.nextOffset, startOffset + messagesWritten);
            }

            if (writtenInThisAttempt == 0 && messagesWritten < messages.size()) {
                log.info("Force rolling to new segment for partition {}-{} as current segment is full.", topic, partitionId);
                // For batch append, we use the next available offset for the new segment base.
                rollToNewSegment(this.nextOffset);
            }
        }
    }

    private void rollToNewSegment(long newBaseOffset) throws IOException {
        if (isRecovering) {
            log.warn("Partition {}-{} is recovering, skipping segment roll.", topic, partitionId);
            return;
        }

        log.info("Rolling to new segment for partition {}-{} at offset {}", topic, partitionId, newBaseOffset);    // Flush the old active segment to ensure all its data is on disk.
        activeSegment.flush();

        // Create the new segment with the correct base offset.
        activeSegment = new LogSegment(partitionPath, newBaseOffset, segmentSize, indexInterval, this.forceFullLogRecovery);
        segments.put(newBaseOffset, activeSegment);
    }

    public synchronized long getNextOffset() {
        return this.nextOffset;
    }

    @Override
    public synchronized void close() throws IOException {
        for (LogSegment segment : segments.values()) {
            try {
                segment.close();
            } catch (IOException e) {
                log.error("Error closing segment {} for partition {}-{}", segment.getBaseOffset(), topic, partitionId, e);
            }
        }
        segments.clear();
        log.info("Partition {}-{} closed.", topic, partitionId);
    }

    public synchronized void repairCorruptedSegments() throws IOException {
        log.info("Starting to check for corrupted segments in partition {}-{}", topic, partitionId);
        for (LogSegment segment : segments.values()) {
            if (segment.isIndexCorrupted()) {
                log.warn("Found corrupted index for segment {}, starting repair", segment.getBaseOffset());
                segment.repairIndex();
            }
        }
        log.info("Segment repair check completed for partition {}-{}", topic, partitionId);
    }

    public synchronized void manualRepairSegments() throws IOException {
        log.info("Starting manual repair of segment structure for partition {}-{}", topic, partitionId);
        NavigableMap<Long, LogSegment> segmentsCopy = new ConcurrentSkipListMap<>(segments);
        for (Map.Entry<Long, LogSegment> entry : segmentsCopy.entrySet()) {
            Long baseOffset = entry.getKey();
            LogSegment segment = entry.getValue();

            if (baseOffset > 0) {
                LogSegment previousSegment = segmentsCopy.lowerEntry(baseOffset).getValue();
                long expectedBaseOffset = previousSegment.getBaseOffset() + previousSegment.getEntryCount().get();

                if (baseOffset != expectedBaseOffset) {
                    log.warn("Found incorrect segment baseOffset: {} (expected: {}), will attempt to repair", baseOffset, expectedBaseOffset);
                    segment.close();
                    segments.remove(baseOffset);
                    if (segment == activeSegment) {
                        activeSegment = new LogSegment(partitionPath, expectedBaseOffset, segmentSize, indexInterval, this.forceFullLogRecovery);
                        segments.put(expectedBaseOffset, activeSegment);
                        log.info("Recreated active segment with baseOffset: {}", expectedBaseOffset);
                    }
                }
            }
        }
        log.info("Segment structure repair completed for partition {}-{}", topic, partitionId);
    }

    public synchronized List<Long> getCorruptedSegments() {
        List<Long> corruptedSegments = new ArrayList<>();
        for (LogSegment segment : segments.values()) {
            if (segment.isIndexCorrupted()) {
                corruptedSegments.add(segment.getBaseOffset());
            }
        }
        return corruptedSegments;
    }
}