package cn.iocoder.yudao.module.ai.websocket;

import cn.hutool.core.codec.Base64;
import cn.hutool.core.util.StrUtil;
import cn.iocoder.yudao.framework.common.util.json.JsonUtils;
import cn.iocoder.yudao.framework.websocket.core.listener.WebSocketMessageListener;
import cn.iocoder.yudao.framework.websocket.core.message.JsonWebSocketMessage;
import cn.iocoder.yudao.module.ai.service.voice.AiVoiceService;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import org.springframework.web.socket.TextMessage;
import org.springframework.web.socket.WebSocketSession;

import jakarta.annotation.Resource;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

/**
 * 简单语音识别 WebSocket 监听器
 * 接收音频数据，缓冲到一定大小后自动识别并返回结果
 *
 * @author 芋道源码
 */
@Component
@Slf4j
public class SimpleVoiceWebSocketListener implements WebSocketMessageListener<SimpleVoiceWebSocketListener.SimpleVoiceMessage> {

    @Resource
    private AiVoiceService aiVoiceService;

    /**
     * 消息类型
     */
    private static final String MESSAGE_TYPE = "simple-voice";

    /**
     * 缓冲区大小阈值（120KB）
     */
    private static final int BUFFER_THRESHOLD = 20 * 1024;

    /**
     * 每个会话的音频缓冲区
     */
    private final Map<String, AudioBuffer> bufferMap = new ConcurrentHashMap<>();

    /**
     * 简单消息结构
     */
    @Data
    public static class SimpleVoiceMessage {
        private String audio;     // Base64 音频数据
        private String language;  // 语言（默认 zh）
    }

    /**
     * 音频缓冲区
     */
    private static class AudioBuffer {
        private final StringBuilder data = new StringBuilder();
        private int byteSize = 0;
        private String language = "zh";
        private long lastUpdateTime = System.currentTimeMillis();
    }

    @Override
    public void onMessage(WebSocketSession session, SimpleVoiceMessage message) {
        String sessionId = session.getId();
        
        try {
            // 获取或创建缓冲区
            AudioBuffer buffer = bufferMap.computeIfAbsent(sessionId, k -> new AudioBuffer());
            
            // 更新语言设置
            if (StrUtil.isNotBlank(message.getLanguage())) {
                buffer.language = message.getLanguage();
            }
            
            // 处理音频数据
            if (StrUtil.isNotBlank(message.getAudio())) {
                processAudio(session, buffer, message.getAudio());
            }
            
        } catch (Exception e) {
            log.error("[onMessage] 处理失败, sessionId: {}", sessionId, e);
            sendError(session, "处理失败: " + e.getMessage());
        }
    }

    /**
     * 处理音频数据
     */
    private void processAudio(WebSocketSession session, AudioBuffer buffer, String audioData) {
        String sessionId = session.getId();
        
        // 累积音频数据
        buffer.data.append(audioData);
        
        // 计算大致的字节大小（Base64 编码后大约是原始数据的 4/3）
        if (audioData.startsWith("data:")) {
            audioData = audioData.substring(audioData.indexOf(",") + 1);
        }
        buffer.byteSize += (audioData.length() * 3 / 4);
        buffer.lastUpdateTime = System.currentTimeMillis();
        
        log.debug("[processAudio] 接收音频, sessionId: {}, 缓冲区大小: {} bytes", sessionId, buffer.byteSize);
        
        // 检查是否达到阈值
        if (buffer.byteSize >= BUFFER_THRESHOLD) {
            recognizeAndSend(session, buffer);
        } else {
            // 如果超过5秒没有新数据，也进行识别
            scheduleTimeoutRecognition(session, buffer);
        }
    }

    /**
     * 识别并发送结果
     */
    private void recognizeAndSend(WebSocketSession session, AudioBuffer buffer) {
        String sessionId = session.getId();
        
        // 如果缓冲区为空，不处理
        if (buffer.data.length() == 0) {
            return;
        }
        
        try {
            // 提取音频数据
            String audioData = buffer.data.toString();
            
            // 清空缓冲区
            buffer.data.setLength(0);
            buffer.byteSize = 0;
            
            log.info("[recognizeAndSend] 开始识别, sessionId: {}, 数据长度: {}", 
                    sessionId, audioData.length());
            
            // 调用识别服务
            String text = aiVoiceService.transcribeAudioBase64(audioData, "webm", buffer.language);
            
            // 发送结果
            if (StrUtil.isNotBlank(text) && !text.contains("未识别到文本")) {
                sendResult(session, text);
                log.info("[recognizeAndSend] 识别成功: {}", text);
            }
            
        } catch (Exception e) {
            log.error("[recognizeAndSend] 识别失败, sessionId: {}", sessionId, e);
        }
    }

    /**
     * 定时识别（超时处理）
     */
    private void scheduleTimeoutRecognition(WebSocketSession session, AudioBuffer buffer) {
        // 简单的超时处理：如果5秒没有新数据，处理现有数据
        new Thread(() -> {
            try {
                Thread.sleep(5000);
                
                // 检查是否有新数据
                if (System.currentTimeMillis() - buffer.lastUpdateTime >= 5000 && !buffer.data.isEmpty()) {
                    recognizeAndSend(session, buffer);
                }
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
            }
        }).start();
    }

    /**
     * 发送识别结果
     */
    private void sendResult(WebSocketSession session, String text) {
        if (session == null || !session.isOpen()) {
            return;
        }
        
        try {
            // 构造响应消息
            JsonWebSocketMessage message = new JsonWebSocketMessage();
            message.setType(MESSAGE_TYPE);
            message.setContent(JsonUtils.toJsonString(Map.of(
                "text", text,
                "timestamp", System.currentTimeMillis()
            )));
            
            // 发送消息
            String payload = JsonUtils.toJsonString(message);
            session.sendMessage(new TextMessage(payload));
            
        } catch (IOException e) {
            log.error("[sendResult] 发送失败", e);
        }
    }

    /**
     * 发送错误消息
     */
    private void sendError(WebSocketSession session, String error) {
        if (session == null || !session.isOpen()) {
            return;
        }
        
        try {
            JsonWebSocketMessage message = new JsonWebSocketMessage();
            message.setType(MESSAGE_TYPE);
            message.setContent(JsonUtils.toJsonString(Map.of(
                "error", error,
                "timestamp", System.currentTimeMillis()
            )));
            
            String payload = JsonUtils.toJsonString(message);
            session.sendMessage(new TextMessage(payload));
            
        } catch (IOException e) {
            log.error("[sendError] 发送失败", e);
        }
    }

    @Override
    public String getType() {
        return MESSAGE_TYPE;
    }

    /**
     * 会话关闭时清理资源
     */
    public void cleanup(WebSocketSession session) {
        String sessionId = session.getId();
        AudioBuffer buffer = bufferMap.remove(sessionId);
        
        // 处理剩余的数据
        if (buffer != null && buffer.data.length() > 0) {
            recognizeAndSend(session, buffer);
        }
        
        log.info("[cleanup] 清理会话资源, sessionId: {}", sessionId);
    }
}
