package org.tao.lightningmq.broker.core;

import io.netty.util.internal.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.tao.lightningmq.broker.cache.CommonCache;
import org.tao.lightningmq.broker.model.*;
import org.tao.lightningmq.broker.utils.AckMessageLock;
import org.tao.lightningmq.broker.utils.UnfairReentrantLock;
import org.tao.lightningmq.common.constants.BrokerConstants;
import org.tao.lightningmq.common.dto.ConsumeMsgCommitLogDTO;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;

/**
 * @Author lht
 * @date 2025/1/4 - 21:45
 * @description:
 */
public class ConsumeQueueConsumeHandler {

    public AckMessageLock ackMessageLock = new UnfairReentrantLock();
    private static final Logger LOGGER = LoggerFactory.getLogger(ConsumeQueueConsumeHandler.class);


    /**
     * 读取当前最新N条consumeQueue的消息内容,并且返回commitLog原始数据
     * @param reqModel 消费队列消费请求体
     * @return 消息数据
     */
    public List<ConsumeMsgCommitLogDTO> consume(ConsumeQueueConsumeReqModel reqModel) {
        String topic = reqModel.getTopic();
        MqTopicModel mqTopicModel = CommonCache.getMqTopicModelMap().get(topic);
        if (mqTopicModel == null) {
            throw new RuntimeException("topic " + topic + " not exist!");
        }
        String consumeGroup = reqModel.getConsumeGroup();
        Integer batchSize = reqModel.getBatchSize();
        Integer queueId = reqModel.getQueueId();

        ConsumeQueueOffsetModel.OffsetTable offsetTable = CommonCache.getConsumeQueueOffsetModel().getOffsetTable();
        ConsumeQueueOffsetModel.ConsumerGroupDetail consumerGroupDetail = offsetTable.getTopicConsumerGroupDetail().get(topic);
        Map<String, Map<String, String>> consumeGroupOffsetMap = consumerGroupDetail.getConsumerGroupDetailMap();
        Map<String, String> queueOffsetDetailMap = consumeGroupOffsetMap.get(consumeGroup);
        List<QueueModel> queueList = mqTopicModel.getQueueList();

        String offsetStr = queueOffsetDetailMap.get(String.valueOf(queueId));
        String[] offsetStrArr = offsetStr.split("#");
        // 用数组只是为了后面的stream流不报错
        int[] consumeQueueOffset = {Integer.parseInt(offsetStrArr[1])};
        QueueModel queueModel = queueList.get(queueId);
        if (queueModel.getLatestOffset().get() <= consumeQueueOffset[0]) {
            return null;
        }
        // 确保读取的batch size不会超出队列的长度
        if (queueModel.getLatestOffset().get() <= consumeQueueOffset[0] + batchSize * BrokerConstants.CONSUME_QUEUE_EACH_MSG_SIZE) {
            batchSize = (queueModel.getLatestOffset().get() - consumeQueueOffset[0]) / BrokerConstants.CONSUME_QUEUE_EACH_MSG_SIZE;
        }

        List<ConsumeQueueMMapFileModel> queueMMapFileModels = CommonCache.getConsumeQueueMMapFileModelManager().get(topic);
        ConsumeQueueMMapFileModel consumeQueueMMapFileModel = queueMMapFileModels.get(queueId);
        List<byte[]> bytes = consumeQueueMMapFileModel.readContentBatch(consumeQueueOffset[0], batchSize);
        CommitLogMMapFileModel commitLogMMapFileModel = CommonCache.getCommitLogMMapFileModelManager().get(topic);
        return bytes.stream().map(content -> {
            ConsumeQueueDetailModel consumeQueueDetailModel = new ConsumeQueueDetailModel();
            consumeQueueDetailModel.buildFromBytes(content);
            // 判断这条消息实体所在的commitLog是否是当前映射中的commitLog，是的话才能读取，否则要读取磁盘上的commitLog内容。
            if (Objects.equals(consumeQueueDetailModel.getCommitLogFilenameStr(), commitLogMMapFileModel.getFile().getName())) {
                LOGGER.info("消息在当前commitLog中，直接读取");
                ConsumeMsgCommitLogDTO consumeMsgCommitLogDTO = commitLogMMapFileModel
                        .readContent(consumeQueueDetailModel.getMsgIndex(), consumeQueueDetailModel.getMsgLen());
                consumeMsgCommitLogDTO.setRetryTimes(consumeQueueDetailModel.getRetryTimes());
                consumeMsgCommitLogDTO.setMsgConsumeQueueOffset(consumeQueueOffset[0]);
                consumeQueueOffset[0] += BrokerConstants.CONSUME_QUEUE_EACH_MSG_SIZE;
                return consumeMsgCommitLogDTO;
            } else {
                LOGGER.info("消息不在当前commitLog中，从磁盘中读取");
                String commitLogFilenameStr = consumeQueueDetailModel.getCommitLogFilenameStr();
                String dirs = commitLogMMapFileModel.getFile().getParent();
                File file = new File(dirs + "/" + commitLogFilenameStr);
                try(RandomAccessFile randomAccessFile = new RandomAccessFile(file, "r")) {
                    MappedByteBuffer map = randomAccessFile.getChannel().map(FileChannel.MapMode.READ_ONLY, consumeQueueDetailModel.getMsgIndex(),
                            consumeQueueDetailModel.getMsgLen());
                    byte[] dest = new byte[consumeQueueDetailModel.getMsgLen()];
                    map.get(dest);
                    ConsumeMsgCommitLogDTO consumeMsgCommitLogDTO = new ConsumeMsgCommitLogDTO();
                    consumeMsgCommitLogDTO.setBody(dest);
                    consumeMsgCommitLogDTO.setFileName(file.getName());
                    consumeMsgCommitLogDTO.setCommitLogOffset(consumeQueueDetailModel.getMsgIndex());
                    consumeMsgCommitLogDTO.setCommitLogSize(consumeQueueDetailModel.getMsgLen());
                    consumeMsgCommitLogDTO.setRetryTimes(consumeQueueDetailModel.getRetryTimes());
                    consumeMsgCommitLogDTO.setMsgConsumeQueueOffset(consumeQueueOffset[0]);
                    consumeQueueOffset[0] += BrokerConstants.CONSUME_QUEUE_EACH_MSG_SIZE;
                    return consumeMsgCommitLogDTO;
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }
        }).collect(Collectors.toList());
    }

    /**
     * 消费队列消费确认，确认后会将offset前移
     * @param ackModel 消费队列消费确认对象
     * @return 是否成功
     */
    public boolean ack(ConsumeQueueConsumeAckModel ackModel) {
        try {
            String topic = ackModel.getTopic();
            String consumeGroup = ackModel.getConsumeGroup();
            Integer queueId = ackModel.getQueueId();
            ConsumeQueueOffsetModel.OffsetTable offsetTable = CommonCache.getConsumeQueueOffsetModel().getOffsetTable();
            Map<String, ConsumeQueueOffsetModel.ConsumerGroupDetail> consumerGroupDetailMap = offsetTable.getTopicConsumerGroupDetail();
            ConsumeQueueOffsetModel.ConsumerGroupDetail consumerGroupDetail = consumerGroupDetailMap.get(topic);
            Map<String, String> consumeQueueOffsetDetailMap = consumerGroupDetail.getConsumerGroupDetailMap().get(consumeGroup);
            String offsetStr = consumeQueueOffsetDetailMap.get(String.valueOf(queueId));
            String[] offsetStrArr = offsetStr.split("#");
            int currentOffset = Integer.parseInt(offsetStrArr[1]);
            int ackEndOffset = ackModel.getStartMsgConsumeQueueOffset() + ackModel.getBatchSize() * BrokerConstants.CONSUME_QUEUE_EACH_MSG_SIZE;
            if (ackEndOffset <= currentOffset) {
                return true;
            }
            consumeQueueOffsetDetailMap.put(String.valueOf(queueId), offsetStrArr[0] + "#" + ackEndOffset);
            return true;
        } catch (Exception e) {
            LOGGER.warn("ack failed, req: {}, err: {}", ackModel, e);
            return false;
        }

    }

}
