package org.tao.lightningmq.broker.event.spi.listenr;

import com.alibaba.fastjson2.JSON;
import io.netty.channel.ChannelHandlerContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.tao.lightningmq.broker.cache.CommonCache;
import org.tao.lightningmq.broker.enums.BrokerClusterModeEnum;
import org.tao.lightningmq.broker.event.model.ConsumeMsgAckEvent;
import org.tao.lightningmq.broker.model.ConsumeQueueConsumeAckModel;
import org.tao.lightningmq.broker.model.MqTopicModel;
import org.tao.lightningmq.broker.rebalance.ConsumerInstance;
import org.tao.lightningmq.common.dto.ConsumeMsgAckReqDTO;
import org.tao.lightningmq.common.dto.ConsumeMsgAckRespDTO;
import org.tao.lightningmq.common.dto.SlaveSyncRespDTO;
import org.tao.lightningmq.common.enums.AckStatus;
import org.tao.lightningmq.common.enums.BrokerEventCode;
import org.tao.lightningmq.common.enums.BrokerResponseCode;
import org.tao.lightningmq.common.event.Listener;
import org.tao.lightningmq.common.tcp.TcpMsg;

import java.util.List;
import java.util.Map;

/**
 * @Author lht
 * @date 2025/1/20 - 14:54
 * @description: 消费者ACK事件监听器
 */
public class ConsumeMsgAckListener implements Listener<ConsumeMsgAckEvent> {

    private static final Logger LOGGER = LoggerFactory.getLogger(ConsumeMsgAckListener.class);

    @Override
    public void onReceive(ConsumeMsgAckEvent event) throws Exception {
        ConsumeMsgAckReqDTO consumeMsgAckReqDTO = event.getConsumeMsgAckReqDTO();
        String topic = consumeMsgAckReqDTO.getTopic();
        String consumeGroup = consumeMsgAckReqDTO.getConsumeGroup();
        Integer queueId = consumeMsgAckReqDTO.getQueueId();
        Integer ackCount = consumeMsgAckReqDTO.getAckCount();
        Integer startMsgConsumeQueueOffset = consumeMsgAckReqDTO.getStartMsgConsumeQueueOffset();
        ConsumeMsgAckRespDTO consumeMsgAckRespDTO = new ConsumeMsgAckRespDTO();
        consumeMsgAckRespDTO.setMsgId(event.getMsgId());

        MqTopicModel mqTopicModel = CommonCache.getMqTopicModelMap().get(topic);
        // 如果topic不存在，则ack失败
        if (mqTopicModel == null) {
            consumeMsgAckRespDTO.setAckStatus(AckStatus.FAIL.getCode());
            event.getContext().writeAndFlush(new TcpMsg(BrokerResponseCode.BROKER_UPDATE_CONSUME_OFFSET_RESP.getCode(),
                    JSON.toJSONBytes(consumeMsgAckRespDTO)));
            LOGGER.warn("topic不存在，ack失败");
            return;
        }

        // 如果消费者组map为空，则ack失败
        Map<String, List<ConsumerInstance>> consumerInstanceMap = CommonCache.getConsumeHoldMap().get(topic);
        if (consumerInstanceMap == null || consumerInstanceMap.isEmpty()) {
            consumeMsgAckRespDTO.setAckStatus(AckStatus.FAIL.getCode());
            event.getContext().writeAndFlush(new TcpMsg(BrokerResponseCode.BROKER_UPDATE_CONSUME_OFFSET_RESP.getCode(),
                    JSON.toJSONBytes(consumeMsgAckRespDTO)));
            LOGGER.warn("消费者组map为空，ack失败");
            return;
        }

        // 当前消费者所属的消费者组为空，则ack失败
        List<ConsumerInstance> consumerInstances = consumerInstanceMap.get(consumeGroup);
        if (consumerInstances == null || consumerInstances.isEmpty()) {
            consumeMsgAckRespDTO.setAckStatus(AckStatus.FAIL.getCode());
            event.getContext().writeAndFlush(new TcpMsg(BrokerResponseCode.BROKER_UPDATE_CONSUME_OFFSET_RESP.getCode(),
                    JSON.toJSONBytes(consumeMsgAckRespDTO)));
            LOGGER.warn("当前消费者所属的消费者组为空，ack失败");
            return;
        }

        // 在所属的消费者组中查找当前消费者，如果不存在，则ack失败
        String currentConsumeReqId = consumeMsgAckReqDTO.getIp() + ":" + consumeMsgAckReqDTO.getPort();
        ConsumerInstance matchInstance = consumerInstances.stream().filter(item -> item.getConsumerReqId().equals(currentConsumeReqId)).findAny().orElse(null);
        if (matchInstance == null) {
            consumeMsgAckRespDTO.setAckStatus(AckStatus.FAIL.getCode());
            event.getContext().writeAndFlush(new TcpMsg(BrokerResponseCode.BROKER_UPDATE_CONSUME_OFFSET_RESP.getCode(),
                    JSON.toJSONBytes(consumeMsgAckRespDTO)));
            return;
        }

        ConsumeQueueConsumeAckModel consumeQueueConsumeAckModel =
                new ConsumeQueueConsumeAckModel(topic, consumeGroup, queueId, ackCount, startMsgConsumeQueueOffset);
        if (CommonCache.getConsumeQueueConsumeHandler().ack(consumeQueueConsumeAckModel)) {
            LOGGER.info("broker receive offset value ,topic is {},consumeGroup is {},queueId is {},ackCount is {}",
                    topic, consumeGroup, queueId, ackCount);
            consumeMsgAckRespDTO.setAckStatus(AckStatus.SUCCESS.getCode());
            TcpMsg tcpMsg = new TcpMsg(BrokerResponseCode.BROKER_UPDATE_CONSUME_OFFSET_RESP.getCode(),
                    JSON.toJSONBytes(consumeMsgAckRespDTO));
            event.getContext().writeAndFlush(tcpMsg);
            // ack成功后如果是主节点的话需要进行消费进度同步
            if(BrokerClusterModeEnum.MASTER_SLAVE.getCode().equals(CommonCache.getGlobalProperties().getBrokerClusterMode())) {
                if ("master".equals(CommonCache.getGlobalProperties().getBrokerClusterRole())) {
                    //主节点，需要将消费进度同步给到从节点
                    for (ChannelHandlerContext slaveChannel : CommonCache.getSlaveChannelMap().values()) {
                        LOGGER.info("主节点将消费进度同步给到从节点: {}", slaveChannel.channel().remoteAddress());
                        slaveChannel.writeAndFlush(new TcpMsg(BrokerEventCode.CONSUME_SUCCESS_MSG.getCode(), JSON.toJSONBytes(event.getConsumeMsgAckReqDTO())));
                    }
                }
            }

        } else {
            consumeMsgAckRespDTO.setAckStatus(AckStatus.FAIL.getCode());
            event.getContext().writeAndFlush(new TcpMsg(BrokerResponseCode.BROKER_UPDATE_CONSUME_OFFSET_RESP.getCode(),
                    JSON.toJSONBytes(consumeMsgAckRespDTO)));
        }

    }
}
