package cn.iocoder.yudao.module.ai.websocket;

import cn.iocoder.yudao.framework.common.util.json.JsonUtils;
import cn.iocoder.yudao.framework.websocket.core.listener.WebSocketMessageListener;
import cn.iocoder.yudao.framework.websocket.core.message.JsonWebSocketMessage;
import cn.iocoder.yudao.module.ai.controller.admin.voice.vo.AiVoiceTranscriptionRespVO;
import cn.iocoder.yudao.module.ai.controller.admin.voice.vo.AiVoiceWebSocketMessage;
import cn.iocoder.yudao.module.ai.service.voice.AiVoiceService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import org.springframework.web.socket.TextMessage;
import org.springframework.web.socket.WebSocketSession;

import jakarta.annotation.Resource;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.*;

/**
 * AI 语音流式转文字 WebSocket 消息监听器
 * 支持实时音频流传输和识别
 *
 * @author 芋道源码
 */
@Component
@Slf4j
public class AiVoiceStreamWebSocketListener implements WebSocketMessageListener<AiVoiceWebSocketMessage> {

    @Resource
    private AiVoiceService aiVoiceService;

    /**
     * 消息类型
     */
    private static final String MESSAGE_TYPE = "ai-voice-transcription";

    /**
     * 存储每个会话的音频数据缓冲区
     * key: sessionId, value: 音频数据缓冲
     */
    private final Map<String, AudioBuffer> audioBufferMap = new ConcurrentHashMap<>();

    /**
     * 异步处理线程池
     */
    private final ExecutorService executorService = Executors.newFixedThreadPool(10);

    /**
     * 定时器，用于定期处理音频块
     */
    private final ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(5);

    /**
     * 音频缓冲类
     */
    private static class AudioBuffer {
        private final StringBuilder dataBuffer = new StringBuilder();
        private volatile long lastUpdateTime = System.currentTimeMillis();
        private volatile int chunkCount = 0;
        private volatile boolean isProcessing = false;
        private String language = "zh";
        private String format = "webm";
        private ScheduledFuture<?> processingTask;
    }

    @Override
    public void onMessage(WebSocketSession session, AiVoiceWebSocketMessage message) {
        String sessionId = session.getId();
        
        try {
            switch (message.getType()) {
                case "init":
                    handleInit(session, message);
                    break;
                case "start":
                    handleStart(session, message);
                    break;
                case "audio":
                    handleAudioStream(session, message);
                    break;
                case "stop":
                    handleStop(session);
                    break;
                case "pause":
                    handlePause(session);
                    break;
                case "resume":
                    handleResume(session);
                    break;
                default:
                    log.warn("[onMessage] 未知的消息类型: {}, sessionId: {}", message.getType(), sessionId);
            }
        } catch (Exception e) {
            log.error("[onMessage] 处理消息失败, sessionId: {}", sessionId, e);
            sendError(session, "处理失败: " + e.getMessage());
        }
    }

    /**
     * 处理初始化消息
     */
    private void handleInit(WebSocketSession session, AiVoiceWebSocketMessage message) {
        String sessionId = session.getId();
        log.info("[handleInit] 初始化连接, sessionId: {}", sessionId);
        
        // 发送欢迎消息
        sendMessage(session, AiVoiceTranscriptionRespVO.builder()
                .text("WebSocket 连接已建立，实时语音识别服务就绪")
                .finished(false)
                .build());
    }

    /**
     * 处理开始识别消息
     */
    private void handleStart(WebSocketSession session, AiVoiceWebSocketMessage message) {
        String sessionId = session.getId();
        
        // 创建新的音频缓冲区
        AudioBuffer buffer = new AudioBuffer();
        buffer.language = message.getLanguage() != null ? message.getLanguage() : "zh";
        buffer.format = message.getFormat() != null ? message.getFormat() : "webm";
        
        audioBufferMap.put(sessionId, buffer);
        
        // 启动定期处理任务（每2秒处理一次累积的音频）
        buffer.processingTask = scheduledExecutor.scheduleAtFixedRate(() -> {
            processAccumulatedAudio(session);
        }, 2, 2, TimeUnit.SECONDS);
        
        log.info("[handleStart] 开始实时语音识别, sessionId: {}, language: {}", 
                sessionId, buffer.language);
        
        sendMessage(session, AiVoiceTranscriptionRespVO.builder()
                .text("开始实时语音识别...")
                .finished(false)
                .build());
    }

    /**
     * 处理音频流数据
     */
    private void handleAudioStream(WebSocketSession session, AiVoiceWebSocketMessage message) {
        String sessionId = session.getId();
        AudioBuffer buffer = audioBufferMap.get(sessionId);
        
        if (buffer == null) {
            log.warn("[handleAudioStream] 未找到音频缓冲区, sessionId: {}", sessionId);
            sendError(session, "请先发送 start 消息");
            return;
        }
        
        // 累积音频数据
        String audioData = message.getData();
        if (audioData != null) {
            synchronized (buffer.dataBuffer) {
                buffer.dataBuffer.append(audioData);
                buffer.lastUpdateTime = System.currentTimeMillis();
                buffer.chunkCount++;
            }
            
            log.debug("[handleAudioStream] 接收音频块 #{}, sessionId: {}, 缓冲区大小: {}", 
                    buffer.chunkCount, sessionId, buffer.dataBuffer.length());
            
            // 如果缓冲区较大，立即处理
            if (buffer.dataBuffer.length() > 50000 && !buffer.isProcessing) {
                processAccumulatedAudio(session);
            }
        }
    }

    /**
     * 处理累积的音频数据
     */
    private void processAccumulatedAudio(WebSocketSession session) {
        String sessionId = session.getId();
        AudioBuffer buffer = audioBufferMap.get(sessionId);
        
        if (buffer == null || buffer.isProcessing || buffer.dataBuffer.length() == 0) {
            return;
        }
        
        buffer.isProcessing = true;
        
        // 异步处理音频
        executorService.submit(() -> {
            try {
                String audioData;
                synchronized (buffer.dataBuffer) {
                    if (buffer.dataBuffer.length() == 0) {
                        buffer.isProcessing = false;
                        return;
                    }
                    
                    // 提取当前缓冲的音频数据
                    audioData = buffer.dataBuffer.toString();
                    buffer.dataBuffer.setLength(0); // 清空缓冲区
                }
                
                log.info("[processAccumulatedAudio] 处理音频数据, sessionId: {}, 数据长度: {}", 
                        sessionId, audioData.length());
                
                // 调用语音识别服务
                String text = aiVoiceService.transcribeAudioBase64(
                        audioData, 
                        buffer.format, 
                        buffer.language
                );
                
                // 发送识别结果（部分结果）
                if (text != null && !text.isEmpty() && !text.contains("未识别到文本")) {
                    sendMessage(session, AiVoiceTranscriptionRespVO.builder()
                            .text(text)
                            .finished(false)
                            .build());
                    
                    log.info("[processAccumulatedAudio] 识别结果: {}, sessionId: {}", 
                            text.substring(0, Math.min(text.length(), 50)), sessionId);
                }
                
            } catch (Exception e) {
                log.error("[processAccumulatedAudio] 处理失败, sessionId: {}", sessionId, e);
                sendError(session, "识别失败: " + e.getMessage());
            } finally {
                buffer.isProcessing = false;
            }
        });
    }

    /**
     * 处理停止识别消息
     */
    private void handleStop(WebSocketSession session) {
        String sessionId = session.getId();
        AudioBuffer buffer = audioBufferMap.remove(sessionId);
        
        if (buffer == null) {
            log.warn("[handleStop] 未找到音频数据, sessionId: {}", sessionId);
            return;
        }
        
        // 停止定期处理任务
        if (buffer.processingTask != null) {
            buffer.processingTask.cancel(false);
        }
        
        log.info("[handleStop] 停止语音识别, sessionId: {}, 共接收 {} 个音频块", 
                sessionId, buffer.chunkCount);
        
        // 处理最后的音频数据
        if (buffer.dataBuffer.length() > 0) {
            processAccumulatedAudio(session);
        }
        
        // 延迟发送完成消息，确保最后的音频处理完成
        scheduledExecutor.schedule(() -> {
            sendMessage(session, AiVoiceTranscriptionRespVO.builder()
                    .text("")
                    .finished(true)
                    .build());
        }, 1, TimeUnit.SECONDS);
    }

    /**
     * 处理暂停消息
     */
    private void handlePause(WebSocketSession session) {
        String sessionId = session.getId();
        AudioBuffer buffer = audioBufferMap.get(sessionId);
        
        if (buffer != null && buffer.processingTask != null) {
            buffer.processingTask.cancel(false);
            log.info("[handlePause] 暂停语音识别, sessionId: {}", sessionId);
            
            sendMessage(session, AiVoiceTranscriptionRespVO.builder()
                    .text("已暂停")
                    .finished(false)
                    .build());
        }
    }

    /**
     * 处理恢复消息
     */
    private void handleResume(WebSocketSession session) {
        String sessionId = session.getId();
        AudioBuffer buffer = audioBufferMap.get(sessionId);
        
        if (buffer != null) {
            // 重新启动定期处理任务
            buffer.processingTask = scheduledExecutor.scheduleAtFixedRate(() -> {
                processAccumulatedAudio(session);
            }, 2, 2, TimeUnit.SECONDS);
            
            log.info("[handleResume] 恢复语音识别, sessionId: {}", sessionId);
            
            sendMessage(session, AiVoiceTranscriptionRespVO.builder()
                    .text("已恢复")
                    .finished(false)
                    .build());
        }
    }

    /**
     * 发送响应消息
     */
    private void sendMessage(WebSocketSession session, AiVoiceTranscriptionRespVO resp) {
        if (session == null || !session.isOpen()) {
            log.warn("[sendMessage] Session 为空或已关闭");
            return;
        }
        
        try {
            // 添加时间戳
            resp.setTimestamp(System.currentTimeMillis());
            
            // 构造 WebSocket 消息
            JsonWebSocketMessage message = new JsonWebSocketMessage();
            message.setType(MESSAGE_TYPE);
            message.setContent(JsonUtils.toJsonString(resp));
            
            // 发送消息
            String payload = JsonUtils.toJsonString(message);
            session.sendMessage(new TextMessage(payload));
            
        } catch (IOException e) {
            log.error("[sendMessage] 发送消息失败", e);
        }
    }

    /**
     * 发送错误消息
     */
    private void sendError(WebSocketSession session, String error) {
        sendMessage(session, AiVoiceTranscriptionRespVO.builder()
                .error(error)
                .finished(true)
                .build());
    }

    @Override
    public String getType() {
        return MESSAGE_TYPE;
    }

    /**
     * 清理资源
     */
    public void cleanup(WebSocketSession session) {
        String sessionId = session.getId();
        AudioBuffer buffer = audioBufferMap.remove(sessionId);
        
        if (buffer != null && buffer.processingTask != null) {
            buffer.processingTask.cancel(true);
        }
        
        log.info("[cleanup] 清理会话资源, sessionId: {}", sessionId);
    }
}
