package cn.iocoder.yudao.module.engine.service.core.queue.audio;

import cn.iocoder.yudao.framework.cache.NettyChannelCacheService;
import cn.iocoder.yudao.framework.common.util.object.BeanUtils;
import cn.iocoder.yudao.module.engine.constant.EngineConstant;
import cn.iocoder.yudao.module.engine.enums.EnumAudioType;
import cn.iocoder.yudao.module.engine.model.context.AIMessageContext;
import cn.iocoder.yudao.module.engine.model.context.ControlEngineContext;
import cn.iocoder.yudao.module.engine.model.context.EngineSessionContext;
import cn.iocoder.yudao.module.engine.model.engine.EngineOutputData;
import cn.iocoder.yudao.module.engine.model.websoket.EngineOutputDataVO;
import cn.iocoder.yudao.module.engine.service.core.engine.IEngineConfigService;
import cn.iocoder.yudao.module.engine.service.core.messagestore.MessageStoreManager;
import cn.iocoder.yudao.module.engine.service.log.IEngineLogService;
import cn.iocoder.yudao.module.resource.api.enums.MessageSourceTypeEnum;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;

import javax.annotation.Nullable;

@Slf4j
@Service
public class AudioQueueManager {
    @Resource
    private IEngineConfigService engineConfigService;
    @Resource
    private IEngineLogService logService;
    @Resource
    private NettyChannelCacheService channelCacheService;
    @Resource
    private MessageStoreManager messageStoreManager;

    public void clear(ControlEngineContext context) {
        context.getSessionCtx().getAudioQueue().clear();
    }

    public boolean addAndSendAudioNotSave(EngineSessionContext sessionContext,
                                          EngineOutputData.Audio audio) {
        final boolean added = addAudioToQueue(sessionContext, audio);
        sendAudioNotSave(sessionContext);
        return added;
    }

    private void sendAudioNotSave(EngineSessionContext sessionContext) {
        final EngineOutputData.Audio audio = sendToUser(sessionContext);
        if (audio == null) {
            return;
        }

        // next
        sendAudioNotSave(sessionContext);
    }

    public boolean addAndSendAudio(ControlEngineContext engineContext,
                                   @Nullable AIMessageContext messageCtx,
                                   EngineOutputData.Audio audio) {
        final boolean added = addAudioToQueue(engineContext.getSessionCtx(), audio);
        sendAndSaveAudio(engineContext, messageCtx);
        return added;
    }

    private void sendAndSaveAudio(ControlEngineContext engineContext, @Nullable AIMessageContext messageCtx) {
        final EngineSessionContext sessionContext = engineContext.getSessionCtx();
        final EngineOutputData.Audio audio = sendToUser(sessionContext);
        if (audio == null) {
            return;
        }

        messageStoreManager.saveMessages(engineContext, messageCtx, audio);

        // next
        sendAndSaveAudio(engineContext, messageCtx);
    }

    private boolean addAudioToQueue(EngineSessionContext sessionContext, EngineOutputData.Audio audio) {
        final String sessionId = sessionContext.getWsSessionId();
        if (!channelCacheService.isOnline(sessionId)) {
            log.warn("用户已离线, userId = {}, sessionId = {}", sessionContext.getUserId(), sessionId);
            return false;
        }

        final boolean added = sessionContext.getAudioQueue().addAudio(audio);
        return added;
    }

    @Nullable
    private EngineOutputData.Audio sendToUser(EngineSessionContext sessionContext) {
        final String sessionId = sessionContext.getWsSessionId();
        final EngineOutputData.Audio audio = sessionContext.getAudioQueue().removeAudio();
        if (audio == null) {
            log.info("没有音频需要发送, userId = {}, sessionId = {}", sessionContext.getUserId(), sessionId);
            return null;
        }

        final EngineOutputData outputData = new EngineOutputData();
        final int playbackIntervalMs = engineConfigService.getPlaybackIntervalMs();
        outputData.setAuxiliaryData(new EngineOutputData.AuxiliaryData(playbackIntervalMs));
        outputData.setMainAudio(audio);

        if (!channelCacheService.isOnline(sessionId)) {
            log.warn("用户已离线, userId = {}, sessionId = {}", sessionContext.getUserId(), sessionId);
            return null;
        }

        // 添加已使用的音频
        sessionContext.addUsedAudioId(audio.getAudioId());

        final EngineOutputDataVO outputDataVO = BeanUtils.toBean(outputData, EngineOutputDataVO.class);
        outputDataVO.getMainAudio().setPriority(outputData.getMainAudio().getAudioType().getPriority());
        log(sessionContext, sessionContext.getUserId(), sessionId, outputDataVO, audio);
        channelCacheService.sendMessage(sessionId, outputDataVO, EngineConstant.ENGINE_INPUT_MAPPING);
        return audio;
    }

    private void log(EngineSessionContext sessionContext, Long userId, String sessionId,
                     EngineOutputDataVO outputDataVO, EngineOutputData.Audio audio) {
        log.info("给client发送音频, queueSize = {}, userId = {}, sessionId = {}, outputDataVO = {}",
                sessionContext.getAudioQueueSize(), userId, sessionId, outputDataVO);
        if (audio.getMessageSendType() == MessageSourceTypeEnum.AI) {
            logService.log(sessionContext, "【输出AI音频】" + audio.getAudioDesc());
        } else if (audio.getMessageSendType() == MessageSourceTypeEnum.VECTOR) {
            logService.log(sessionContext, "【输出水库音频】" + audio.getAudioDesc());
        } else {
            final EnumAudioType audioType = audio.getAudioType();
            switch (audioType) {
                case CHAT:
                    logService.log(sessionContext, "【输出系统音频】" + audio.getAudioDesc());
                    break;
                case GUID:
                    logService.log(sessionContext, "【输出语音引导音频】" + audio.getAudioDesc());
                    break;
                case BUY_GUID:
                    logService.log(sessionContext, "【输出购买引导音频】" + audio.getAudioDesc());
                    break;
                case THREE_AXIS_HIGH:
                    logService.log(sessionContext, "【输出高潮响应音频】" + audio.getAudioDesc());
                    break;
                case THREE_AXIS_RANGE:
                    logService.log(sessionContext, "【输出坐标幅度音频】" + audio.getAudioDesc());
                    break;
                case BGM:
                case PREFIX:
                case SUFFIX:
                    logService.log(sessionContext, "【输出叫床声音频】" + audio.getAudioDesc());
                    break;
                default:
                    throw new IllegalArgumentException("未知音频类型: " + audioType);
            }
        }
    }

}
