package com.bnd.controller;

import cn.hutool.core.collection.CollectionUtil;
import cn.hutool.core.util.StrUtil;
import com.alibaba.dashscope.aigc.generation.Generation;
import com.alibaba.dashscope.aigc.generation.GenerationParam;
import com.alibaba.dashscope.aigc.generation.GenerationResult;
import com.alibaba.dashscope.audio.asr.recognition.Recognition;
import com.alibaba.dashscope.audio.asr.recognition.RecognitionParam;
import com.alibaba.dashscope.audio.asr.recognition.RecognitionResult;
import com.alibaba.dashscope.audio.tts.SpeechSynthesisResult;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesisAudioFormat;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesisParam;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesizer;
import com.alibaba.dashscope.common.Message;
import com.alibaba.dashscope.common.ResultCallback;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.bnd.common.ErrorCode;
import com.bnd.common.manager.GenericChatSessionManager;
import com.bnd.config.AiConfig;
import com.bnd.domain.session.WsChatSessionV2;
import com.bnd.domain.ws.WebSocketMessage;
import com.bnd.service.CharacterInfoService;
import com.bnd.service.ChatHistoryService;
import com.bnd.utils.MessageChatHistoryConverter;
import com.bnd.utils.ThrowUtils;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.reactivex.Flowable;
import io.reactivex.schedulers.Schedulers;
import jakarta.annotation.Resource;
import jakarta.websocket.*;
import jakarta.websocket.server.PathParam;
import jakarta.websocket.server.ServerEndpoint;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

@Slf4j
@ServerEndpoint(value = "/rolePlay/ws/v2/{characterId}")
@Component
public class RolePlayWsHandlerV2 {
    private static GenericChatSessionManager<WsChatSessionV2> wsChatSessionV2Manager;
    private static ChatHistoryService chatHistoryService;
    private static CharacterInfoService characterInfoService;
    private static AiConfig aiConfig;

    @Resource
    private void setAiConfig(AiConfig aiConfig) {
        RolePlayWsHandlerV2.aiConfig = aiConfig;
    }

    @Resource
    private void setCharacterInfoService(CharacterInfoService characterInfoService) {
        RolePlayWsHandlerV2.characterInfoService = characterInfoService;
    }

    @Resource(name = "wsChatSessionV2Manager")
    private void setGenericChatSessionManager(GenericChatSessionManager<WsChatSessionV2> wsChatSessionV2Manager) {
        RolePlayWsHandlerV2.wsChatSessionV2Manager = wsChatSessionV2Manager;
    }

    @Resource
    private void setChatHistoryService(ChatHistoryService chatHistoryService) {
        RolePlayWsHandlerV2.chatHistoryService = chatHistoryService;
    }

    private static final ObjectMapper objectMapper = new ObjectMapper();
    private static final Map<Long, WsChatSessionV2> chatSessionMap = new ConcurrentHashMap<>();

    /**
     * 连接建立时触发
     */
    @OnOpen
    public void onOpen(Session session, @PathParam("characterId") Long characterId) {
        // 将 characterId 存储到 session 属性中，以便后续使用
        session.getUserProperties().put("characterId", characterId);

        if (!chatSessionMap.containsKey(characterId)){
            WsChatSessionV2 wsChatSession = wsChatSessionV2Manager.getOrCreate("default", characterId, chatId -> new WsChatSessionV2(chatId, characterId, session));

            // 创建语音合成参数
            wsChatSession.setSpeechSynthesisParam(getTtsParam(wsChatSession));

            // 创建语音识别参数
            wsChatSession.setRecognitionParam(getAsrParam());

            //创建语音合成器
            wsChatSession.setSpeechSynthesizer(new SpeechSynthesizer(wsChatSession.getSpeechSynthesisParam(), getTtsCallback(wsChatSession)));

            chatSessionMap.put(characterId, wsChatSession);
        }
        log.info("WebSocket连接已建立，characterId为: {}", characterId);
    }

    /**
     * 收到消息时触发
     * @param message
     * @param session
     */
    @OnMessage
    public void onMessage(String message, Session session) {
        Long characterId = getCharacterId(session);
        WsChatSessionV2 wsChatSession = chatSessionMap.get(characterId);
        try{
            WebSocketMessage wsMessage = objectMapper.readValue(message, WebSocketMessage.class);
            String type = wsMessage.getType();
            ThrowUtils.throwIf(StrUtil.isBlank(type), ErrorCode.SYSTEM_ERROR, "消息类型错误！");
            log.info("收到{}类型的消息：{}", type, message);
            switch (type) {
                case "START" -> {
                    log.info("开始对话：{}", characterId);
                    try {
                        if (session.isOpen()){
                            session.getBasicRemote().sendText("START");
                        }
                    } catch (IOException e) {
                        log.warn("WebSocket连接已断开，无法发送START消息", e);
                        handleEnd(session);
                    }
                }
                case "RECOGNIZE_START" -> handleRecognizeStart(wsChatSession);
                case "RECOGNIZE_STOP" -> handleRecognizeStop(wsChatSession);
                case "TTS_START" -> handleTtsStart(wsChatSession);
                case "TTS_STOP" -> handleTtsStop(wsChatSession);
                case "END" -> handleEnd(session);
            }
        }catch (com.fasterxml.jackson.core.JsonProcessingException e){
            log.error("解析消息失败：{}", e.getMessage());
            sendErrorMessage(session);
        }
    }


    /**
     * 收到二进制消息时触发
     * @param message
     * @param session
     */
    @OnMessage
    public void onMessage(ByteBuffer message, Session session) {
        log.info("收到二进制消息：{}", message);
        if (message == null || message.remaining() == 0){
            log.warn("收到空消息");
            return;
        }
        Long characterId = getCharacterId(session);
        WsChatSessionV2 wsChatSession = chatSessionMap.get(characterId);
        ByteBuffer asrByteBuffer = wsChatSession.getAsrByteBuffer();

        try {
            // 将接收到的音频数据写入缓冲区
            synchronized (asrByteBuffer) {
                // 检查缓冲区是否有足够空间
                if (asrByteBuffer.remaining() >= message.remaining()) {
                    asrByteBuffer.put(message);
                } else {
                    // 如果缓冲区空间不足，先发送现有数据
                    if (asrByteBuffer.position() > 0) {
                        asrByteBuffer.flip(); // 切换到读模式
                        wsChatSession.getRecognition().sendAudioFrame(asrByteBuffer);
                        asrByteBuffer.clear(); // 清空缓冲区
                    }
                    // 如果当前数据块小于缓冲区大小，直接写入缓冲区
                    if (message.remaining() <= asrByteBuffer.capacity()) {
                        asrByteBuffer.put(message);
                    } else {
                        // 如果数据块大于缓冲区，直接发送
                        wsChatSession.getRecognition().sendAudioFrame(message);
                    }
                }

                // 当缓冲区达到一定阈值时发送数据
                if (asrByteBuffer.position() >= asrByteBuffer.capacity() * 0.8) { // 80%阈值
                    asrByteBuffer.flip(); // 切换到读模式
                    wsChatSession.getRecognition().sendAudioFrame(asrByteBuffer);
                    asrByteBuffer.clear(); // 清空缓冲区
                }
            }
        } catch (Exception e) {
            log.error("处理语音数据时发生错误，characterId: {}", wsChatSession.getCharacterId(), e);
        }
    }


    /**
     * 连接关闭时触发【自内向外关闭资源】
     */
    @OnClose
    public void onClose(Session session) {
        handleEnd(session);
    }

    /**
     * 发生错误时触发
     */
    @OnError
    public void onError(Session session, Throwable error) {
        log.error("WebSocket发生错误", error);
    }

    /**
     * 处理对话结束请求 【释放资源、对话持久化】
     */
    private void handleEnd(Session session){
        log.warn("对话结束...");
        Long characterId = getCharacterId(session);
        if (characterId == null){
            return;
        }

        WsChatSessionV2 wsChatSession = chatSessionMap.get(characterId);

        if (wsChatSession == null) {
            return;
        }

        try {
            // 1. 停止语音识别
            Recognition recognizer = wsChatSession.getRecognition();
            if (recognizer != null) {
                try {
                    recognizer.stop();
                    if (recognizer.getDuplexApi() != null) {
                        recognizer.getDuplexApi().close(1000, "bye");
                    }
                } catch (Exception e) {
                    log.warn("关闭语音识别器时出现异常: {}", e.getMessage());
                }
            }

            // 2. 停止语音合成
            SpeechSynthesizer synthesizer = wsChatSession.getSpeechSynthesizer();
            if (synthesizer != null) {
                try {
                    // 阻塞等待流式合成
                    synthesizer.streamingComplete();
                    if (synthesizer.getDuplexApi() != null) {
                        synthesizer.getDuplexApi().close(1000, "bye");
                    }
                } catch (Exception e) {
                    log.warn("关闭语音合成器时出现异常: {}", e.getMessage());
                }
            }

            // 3. 关闭WebSocket会话
            try {
                if (session.isOpen()) {
                    session.close();
                }
            } catch (Exception e) {
                log.warn("关闭WebSocket会话时出现异常: {}", e.getMessage());
            }

            log.info("WebSocket连接关闭，characterId: {}", characterId);
        } catch (Exception e) {
            log.error("结束对话异常，characterId: {}", characterId, e);
        } finally {
            //保存对话内容
            List<Message> newMessageList = wsChatSession.getNewMessageList();
            log.info("保存角色{}对话内容，共{}条", characterId , newMessageList.size());
            try {
                boolean isSuccess = false;
                if (CollectionUtil.isNotEmpty(newMessageList)){
                    isSuccess = chatHistoryService.saveBatch(
                            MessageChatHistoryConverter.messageListToChatHistoryList(newMessageList,
                                    wsChatSession.getChatId(),
                                    characterId));
                }
                // 如果保存成功，则从缓存中移除会话
                if (isSuccess){
                    chatSessionMap.remove(getCharacterId(session));
                    //清空缓存
                    characterInfoService.clearCharacterDetailCache(characterId);
                }
            }catch (Exception e){
                log.error("保存对话内容异常", e);
            }
        }
    }



    /**
     * 处理语音合成开始请求
     */
    private void handleTtsStart(WsChatSessionV2 wsChatSession){
        log.info("[语音合成] 开始进行语音合成");
        List<Message> messageList = wsChatSession.getMessageList();
        if (CollectionUtil.isEmpty(messageList)){
            log.warn("[语音合成] 角色{}没有对话内容，无法进行语音合成", wsChatSession.getCharacterId());
            return;
        }

        wsChatSession.setAllowAiSpeech(true);
        GenerationParam param = GenerationParam.builder()
                .apiKey(aiConfig.getDashscope().getApiKey())
                .model(aiConfig.getDashscope().getChatModel())
                .messages(messageList)
                .resultFormat(GenerationParam.ResultFormat.MESSAGE)
                .incrementalOutput(true) // 开启增量输出，流式返回
                .build();
        Generation gen = wsChatSession.getGeneration();
        SpeechSynthesizer synthesizer = wsChatSession.getSpeechSynthesizer();

        // 每次语音合成更新callback
        synthesizer.updateParamAndCallback(wsChatSession.getSpeechSynthesisParam(), getTtsCallback(wsChatSession));

        try {
            Flowable<GenerationResult> result = gen.streamCall(param);
            StringBuilder fullContent = new StringBuilder();
            result
                    .subscribeOn(Schedulers.io()) // IO线程执行请求
                    .observeOn(Schedulers.computation()) // 计算线程处理响应
                    .subscribe(
                            // onNext: 处理每个响应片段
                            message -> {
                                try {
                                    String content = message.getOutput().getChoices().get(0).getMessage().getContent();
                                    String finishReason = message.getOutput().getChoices().get(0).getFinishReason();

                                    // 进行语音合成 - 增加播放权限检查
                                    if (StrUtil.isNotBlank(content)
                                            && !content.trim().isEmpty()
                                            && wsChatSession.allowAiSpeech()) {
                                        log.info("响应内容：{}", content.trim());
                                        synthesizer.streamingCall(content);
                                        fullContent.append(content);
                                    }

                                    // 当 finishReason 不为 null 时，表示是最后一个 chunk，输出用量信息
                                    if (finishReason != null && !"null".equals(finishReason)) {
                                        log.info("--- 请求用量 ---");
                                        log.info("输入 Tokens：{}", message.getUsage().getInputTokens());
                                        log.info("输出 Tokens：{}", message.getUsage().getOutputTokens());
                                        log.info("总 Tokens：{}", message.getUsage().getTotalTokens());
                                    }
                                } catch (Exception e) {
                                    log.error("处理消息片段异常", e);
                                }
                            },
                            // onError: 处理错误
                            error -> {
                                log.error("请求失败: {}", error.getMessage(), error);
                            },
                            // onComplete: 完成回调
                            () -> {
                                // 等待结束流式语音合成
                                synthesizer.streamingComplete();
                                Message assiantMessage = Message.builder()
                                        .role(Role.ASSISTANT.getValue())
                                        .content(fullContent.toString())
                                        .build();
                                messageList.add(assiantMessage);
                                wsChatSession.getNewMessageList().add(assiantMessage);
                                log.info("完整响应: {}", fullContent.toString());
                            }
                    );
        } catch (Exception e) {
            log.error("流式推送异常: {}", e.getMessage(), e);
        }
    }


    /**
     * 处理语音合成停止请求
     */
    private void handleTtsStop(WsChatSessionV2 wsChatSession){
        log.info("[语音合成] 停止进行语音合成");
        SpeechSynthesizer synthesizer = wsChatSession.getSpeechSynthesizer();
        // 等待结束流式语音合成
        synthesizer.streamingComplete();
    }


    /**
     * 处理语音识别开始请求
     */
    // 在 handleRecognizeStart 方法中增加打断逻辑
    private void handleRecognizeStart(WsChatSessionV2 wsChatSession){
        log.info("[语音识别] 开始进行语音识别");
        // 在用户说话时，停止当前的流式语音合成
        /*SpeechSynthesizer currentSynthesizer = wsChatSession.getSpeechSynthesizer();
        if (currentSynthesizer != null) {
            try {
                // 停止当前的流式语音合成【如果正在合成】
                if (currentSynthesizer.getAudioData() != null){
                    // 该方法会阻塞当前线程，直到回调接口（ResultCallback）的onComplete或者onError回调触发后才会释放线程阻塞。
                    currentSynthesizer.streamingComplete();
                    // 清空缓冲区
                    wsChatSession.getTtsByteBuffer().clear();
                }
                log.info("已中断当前正在进行的语音合成");
            } catch (Exception e) {
                log.warn("停止当前语音合成时出现异常: {}", e.getMessage());
            }
        }*/

        // 添加：禁止语音播放，准备接收新输入
        wsChatSession.setAllowAiSpeech(false);

        Recognition recognizer = wsChatSession.getRecognition();
        recognizer.call(wsChatSession.getRecognitionParam(), getAsrCallback(wsChatSession));
    }


    /**
     * 处理语音识别停止请求
     */
    private void handleRecognizeStop(WsChatSessionV2 wsChatSession){
        log.info("[语音识别] 停止语音识别");
        Recognition recognizer = wsChatSession.getRecognition();
        ByteBuffer buffer = wsChatSession.getAsrByteBuffer();

        try{
            if (recognizer != null && buffer != null) {
                // 发送缓冲区中剩余的数据
                synchronized (buffer) {
                    if (buffer.position() > 0) {
                        buffer.flip(); // 切换到读模式
                        recognizer.sendAudioFrame(buffer);
                        buffer.clear(); // 清空缓冲区
                    }
                }
            }
        }catch (Exception e){
            log.error("处理语音识别停止请求时发生错误，characterId: {}", wsChatSession.getCharacterId(), e);
        }finally {
            // 停止识别器
            if (recognizer != null){
                try{
                    recognizer.stop();
                }catch (ApiException e){
                    log.error("停止语音识别失败: {}", e.getMessage());
                }
            }
        }
    }

    /**
     * 获取characterId
     */
    private Long getCharacterId(Session session) {
        return (Long) session.getUserProperties().get("characterId");
    }

    /**
     * 发送错误信息给客户端
     */
    private void sendErrorMessage(Session session) {
        try {
            if (session.isOpen()){
                session.getBasicRemote().sendText("ERROR");
            }
        } catch (IOException e) {
            log.error("发送错误消息失败: {}", e.getMessage());
        }
    }

    /**
     * 获取语音合成参数
     * @return
     */
    private SpeechSynthesisParam getTtsParam(WsChatSessionV2 wsChatSessionV2) {
        return SpeechSynthesisParam.builder()
                .apiKey(aiConfig.getDashscope().getApiKey())
                .model(aiConfig.getDashscope().getSsModel())
                .voice(wsChatSessionV2.getVoiceId())
                .format(SpeechSynthesisAudioFormat.PCM_22050HZ_MONO_16BIT)
                .build();
    }

    /**
     * 获取语音识别参数
     * @return
     */
    private RecognitionParam getAsrParam() {
        return RecognitionParam.builder()
                .apiKey(aiConfig.getDashscope().getApiKey())
                .model(aiConfig.getDashscope().getAsrModel())
                .format("pcm")
                .sampleRate(16000)
                .parameter("language_hints", new String[]{"zh", "en"})
                .build();
    }

    /**
     * 获取语音识别回调
     * @return
     */
    private ResultCallback<RecognitionResult> getAsrCallback(WsChatSessionV2 wsChatSession) {
        List<Message> messageList = wsChatSession.getMessageList();
        Session session = wsChatSession.getSession();
        return new ResultCallback<RecognitionResult>() {
            @Override
            public void onEvent(RecognitionResult message) {
                // 检查会话是否仍然有效
                if (!session.isOpen()) {
                    log.warn("WebSocket session is closed, cannot process recognition result");
                    return;
                }

                if (message.isSentenceEnd()) {
                    log.info("[语音识别] 语音识别最终结果: {} ", message.getSentence().getText());
                    Message userMessage = Message.builder()
                            .role(Role.USER.getValue())
                            .content(message.getSentence().getText())
                            .build();
                    messageList.add(userMessage);
                    wsChatSession.getNewMessageList().add(userMessage);
                } else {
                    log.info("[语音识别] 过程语音识别结果: {} ", message.getSentence().getText());
                }
            }

            @Override
            public void onComplete() {
                log.info("[语音识别] 语音识别完成 ");
                wsChatSession.setAsrSessionReady(true);
            }

            @Override
            public void onError(Exception e) {
                log.error("[语音识别] 语音识别错误: {} ", e.getMessage());
                wsChatSession.setAsrSessionReady(true);
            }
        };
    }


    /**
     * 获取语音合成回调
     * @return
     */
    private ResultCallback<SpeechSynthesisResult> getTtsCallback(WsChatSessionV2 wsChatSession) {
        Object ttsLock = wsChatSession.getTtsLock();
        Session session = wsChatSession.getSession();
        return new ResultCallback<SpeechSynthesisResult>() {
            @Override
            public void onEvent(SpeechSynthesisResult result) {
                // 检查会话是否仍然有效
                if (!session.isOpen() || ttsLock == null || !wsChatSession.allowAiSpeech()) {
                    return;
                }

                if (result.getAudioFrame() != null){
                    try{
                        ByteBuffer audioFrame = result.getAudioFrame();
                        //加锁，避免出现并发读写问题
                        synchronized (ttsLock) {
                            // 如果数据块大于缓冲区，直接发送
                            if (session.isOpen() && wsChatSession.allowAiSpeech()){
                                try {
                                    log.info("[语音合成] 发送音频数据大小: {}", audioFrame.capacity());
                                    session.getBasicRemote().sendBinary(audioFrame);
                                } catch (IOException e) {
                                    // 处理连接异常，记录日志
                                    log.warn("WebSocket connection lost: {}", e.getMessage());
                                }
                            }
                        }
                    }catch (Exception e){
                        log.error("发送音频数据失败", e);
                    }
                }
            }

            @Override
            public void onComplete() {
                log.info("[语音合成] 语音合成完成");
            }

            @Override
            public void onError(Exception e) {
                log.error("[语音合成] 语音合成错误: {} ", e.getMessage());
            }
        };
    }
}


