package com.qny.ai.websocket;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.qny.ai.service.impl.KodoStorageService;
import com.qny.ai.service.impl.QiniuVoiceService;
import com.qny.ai.service.StreamingLLMService;
import com.qny.ai.service.StreamingASRService;
import com.qny.ai.service.StreamingTTSService;
import jakarta.annotation.Resource;
import org.springframework.ai.chat.client.ChatClient;
import org.springframework.ai.chat.messages.UserMessage;
import org.springframework.ai.chat.messages.SystemMessage;
import org.springframework.ai.chat.messages.AssistantMessage;
import com.qny.ai.mapper.ConversationMapper;
import com.qny.ai.mapper.MessageMapper;
import com.qny.ai.mapper.AppRoleMapper;
import com.qny.ai.entity.Conversation;
import com.qny.ai.entity.Message;
import com.qny.ai.entity.AppRole;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import org.springframework.web.socket.CloseStatus;
import org.springframework.web.socket.TextMessage;
import org.springframework.web.socket.WebSocketSession;
import org.springframework.web.socket.handler.TextWebSocketHandler;
import org.springframework.util.StringUtils;

import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.Base64;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

@Slf4j
@Component
public class VoiceHandler extends TextWebSocketHandler {

    private final ObjectMapper mapper = new ObjectMapper();
    private final Map<String, WebSocketSession> sessions = new ConcurrentHashMap<>();
    private final QiniuVoiceService qiniuVoiceService;
    private final KodoStorageService kodoStorageService;
    private final ChatClient chatClient;
    @Resource
    private StreamingLLMService streamingLLMService;
    private final StreamingASRService streamingASRService;
    private final StreamingTTSService streamingTTSService;
    private final ConversationMapper conversationMapper;
    private final MessageMapper messageMapper;
    private final AppRoleMapper appRoleMapper;
    private final Map<String, StringBuilder> sessionAudioBuf = new ConcurrentHashMap<>();
    
    // 语音通话相关
    private final Map<String, String> callSessions = new ConcurrentHashMap<>(); // sessionId -> callId
    private final Map<String, CallInfo> activeCalls = new ConcurrentHashMap<>(); // callId -> CallInfo

    public VoiceHandler(QiniuVoiceService qiniuVoiceService, KodoStorageService kodoStorageService, ChatClient chatClient,
                         StreamingASRService streamingASRService,
                        StreamingTTSService streamingTTSService, ConversationMapper conversationMapper, MessageMapper messageMapper, AppRoleMapper appRoleMapper) {
        this.qiniuVoiceService = qiniuVoiceService;
        this.kodoStorageService = kodoStorageService;
        this.chatClient = chatClient;
        this.streamingASRService = streamingASRService;
        this.streamingTTSService = streamingTTSService;
        this.conversationMapper = conversationMapper;
        this.messageMapper = messageMapper;
        this.appRoleMapper = appRoleMapper;
    }

    @Override
    public void afterConnectionEstablished(WebSocketSession session) {
        sessions.put(session.getId(), session);
        log.info("[VOICE][OPEN] session=" + session.getId());
    }

    @Override
    protected void handleTextMessage(WebSocketSession session, TextMessage message) throws Exception {
        JsonNode node = mapper.readTree(message.getPayload());
        String type = node.path("type").asText("");
        if ("ping".equals(type)) {
            log.info("[VOICE][PING] session=" + session.getId());
            session.sendMessage(new TextMessage("{\"type\":\"pong\"}"));
            return;
        }
        
        // 语音通话信令处理
        if (handleCallSignaling(session, node)) {
            return;
        }

        if ("audio_blob".equals(type)) {
            // 非流式：一次性音频（统一使用WAV格式）
            String mime = node.path("mime").asText("audio/wav"); // 统一使用WAV格式
            String base64 = node.path("payload").asText("");
            String convId = node.path("conversationId").asText("");
            log.info("[VOICE][AUDIO_BLOB] session=" + session.getId() + ", conv=" + convId + ", mime=" + mime + ", sizeB64=" + (base64 != null ? base64.length() : 0));
            // 第一步：先返回占位的识别与回复文本
            String asrText = "占位：识别到的文本";
            ObjectNode asr = mapper.createObjectNode();
            asr.put("type", "asr_final");
            asr.put("text", asrText);
            session.sendMessage(new TextMessage(asr.toString()));
            log.info("[VOICE][ASR_FINAL] -> " + asr.toString());

            String llmText = "占位：AI 的回复";
            ObjectNode llm = mapper.createObjectNode();
            llm.put("type", "llm_text");
            llm.put("text", llmText);
            session.sendMessage(new TextMessage(llm.toString()));
            log.info("[VOICE][LLM_TEXT] -> " + llm.toString());

            // 使用流式ASR服务处理音频识别
            try {
                // base64 -> bytes
                byte[] bytes = java.util.Base64.getDecoder().decode(base64);
                String ext = mime.contains("webm") ? "webm" : (mime.contains("ogg") ? "ogg" : "dat");
                String asrFormat = ext.equals("ogg") ? "ogg" : (ext.equals("webm") ? "ogg" : ext);
                
                // 使用流式ASR服务
                streamingASRService.processStreamingASR(bytes, asrFormat, session, session.getId())
                    .thenAccept(result -> {
                        if (result != null && !result.trim().isEmpty()) {
                            log.info("[VOICE][STREAMING_ASR_OK] text=" + result);
                        }
                    })
                    .exceptionally(throwable -> {
                        log.info("[VOICE][STREAMING_ASR_FAIL] " + throwable.getMessage());
                        return null;
                    });
                
                // 回退到传统ASR处理
                String url = kodoStorageService.uploadBytes(bytes, ext);
                log.info("[VOICE][UPLOAD_OK] url=" + url);
                String realText = qiniuVoiceService.asrByUrl(url, asrFormat);
                if (realText != null && !realText.isBlank()) {
                    asrText = realText;
                }
            } catch (Exception e) {
                log.info("[VOICE][UPLOAD_FAIL] " + e.getMessage());
            }

            // 生成 AI 回复（先使用 ChatClient 简单对答，失败则使用占位回复）
            try {
                String reply = chatClient.prompt()
                        .system("你是一个友善的AI助手，请用简洁有温度的语言回答用户问题。")
                        .user(asrText)
                        .call()
                        .content();
                if (reply != null && !reply.isBlank()) {
                    llmText = reply;
                } else {
                    llmText = "抱歉，我现在有点忙，请稍后再试。";
                }
            } catch (Exception e) {
                log.info("[VOICE][LLM_FAIL] " + e.getMessage());
                if (e.getMessage() != null && e.getMessage().contains("timeout")) {
                    llmText = "抱歉，网络有点慢，请稍后再试。";
                } else if (e.getMessage() != null && e.getMessage().contains("401")) {
                    llmText = "AI服务暂时不可用，请稍后再试。";
                } else {
                    llmText = "抱歉，我现在有点忙，请稍后再试。";
                }
            }

            // 获取角色的默认音色
            String voiceType = "qiniu_zh_female_wwxkjx"; // 默认音色
            try {
                if (!convId.isEmpty()) {
                    Long conversationId = Long.parseLong(convId);
                    Conversation conversation = conversationMapper.selectById(conversationId);
                    if (conversation != null) {
                        AppRole role = appRoleMapper.selectById(conversation.getRoleId());
                        if (role != null && role.getDefaultVoiceType() != null) {
                            voiceType = role.getDefaultVoiceType();
                            log.info("[VOICE][AUDIO_BLOB] Using role default voice: " + voiceType);
                        }
                    }
                }
            } catch (Exception e) {
                log.warn("[VOICE][AUDIO_BLOB] Failed to get role default voice: " + e.getMessage());
            }
            
            // 使用流式TTS服务合成音频
            final String finalLlmText = llmText; // 创建final变量供lambda使用
            final String finalVoiceType = voiceType; // 创建final变量供lambda使用
            streamingTTSService.processStreamingTTS(finalLlmText, voiceType, "mp3", session, session.getId(), null)
                .thenRun(() -> {
                    log.info("[VOICE][STREAMING_TTS_COMPLETED] for session: " + session.getId());
                })
                .exceptionally(throwable -> {
                    log.info("[VOICE][STREAMING_TTS_FAIL] " + throwable.getMessage());
                    
                    // 回退到传统TTS处理
                    try {
                        String b64 = qiniuVoiceService.ttsToBase64(finalLlmText, finalVoiceType, "mp3");
                        if (b64 == null || b64.isEmpty()) b64 = Base64.getEncoder().encodeToString(new byte[0]);
                        ObjectNode tts = mapper.createObjectNode();
                        tts.put("type", "tts_blob");
                        tts.put("mime", "audio/mp3");
                        tts.put("payload", b64);
                        session.sendMessage(new TextMessage(tts.toString()));
                        log.info("[VOICE][TTS_FALLBACK] -> mime=audio/mp3 sizeB64=" + b64.length());
                    } catch (Exception e) {
                        log.info("[VOICE][TTS_FALLBACK_FAIL] " + e.getMessage());
                    }
                    return null;
                });
            return;
        }

        // 处理文本消息
        if ("text_message".equals(type)) {
            String text = node.path("text").asText("");
            String convIdStr = node.path("conversationId").asText("");
            Long convIdLong = null;
            if (!convIdStr.isEmpty()) {
                try { convIdLong = Long.parseLong(convIdStr); } catch (Exception ignore) {}
            }
            
            if (!text.isEmpty()) {
                // 入库用户文本消息
                try {
                    if (convIdLong != null) {
                        Conversation c = conversationMapper.selectById(convIdLong);
                        if (c != null) {
                            Message userMsg = Message.builder()
                                    .conversationId(convIdLong)
                                    .content(text)
                                    .originalContent(null)
                                    .language(c.getLastLanguage() != null ? c.getLastLanguage() : "zh")
                                    .isUserMessage(1)
                                    .emotion("neutral")
                                    .timestamp(java.time.LocalDateTime.now())
                                    .build();
                            messageMapper.insert(userMsg);
                        }
                    }
                } catch (Exception e) {
                    log.info("[VOICE][DB_FAIL_USER_TEXT] " + e.getMessage());
                }
                
                // 使用流式LLM处理文本消息（不在语音通话状态）
                try {
                    streamingLLMService.processStreamingLLM(text, session, convIdLong, false);
                    log.info("[VOICE][STREAMING_LLM_TEXT_START] -> " + text);
                } catch (Exception e) {
                    log.info("[VOICE][STREAMING_LLM_TEXT_FAIL] " + e.getMessage());
                    
                    // 回退到非流式处理
                    String llmText = "占位：AI 的回复";
                    try {
                        String reply = chatClient.prompt().messages(new UserMessage(text)).call().content();
                        if (reply != null && !reply.isBlank()) {
                            llmText = reply;
                        }
                    } catch (Exception ex) {
                        log.info("[VOICE][LLM_TEXT_FALLBACK_FAIL] " + ex.getMessage());
                    }

                    ObjectNode llm = mapper.createObjectNode();
                    llm.put("type", "llm_text");
                    llm.put("text", llmText);
                    session.sendMessage(new TextMessage(llm.toString()));
                    log.info("[VOICE][LLM_TEXT_FALLBACK] -> " + llm.toString());
                }
            }
            return;
        }

        // 处理TTS请求
        if ("tts_request".equals(type)) {
            String text = node.path("text").asText("");
            String voiceType = node.path("voiceType").asText("");
            String conversationIdStr = node.path("conversationId").asText("");
            String messageId = node.path("messageId").asText("");
            
            // 如果没有传递音色类型，尝试从角色获取默认音色
            if (voiceType.isEmpty() && !conversationIdStr.isEmpty()) {
                try {
                    Long conversationId = Long.parseLong(conversationIdStr);
                    Conversation conversation = conversationMapper.selectById(conversationId);
                    if (conversation != null) {
                        AppRole role = appRoleMapper.selectById(conversation.getRoleId());
                        if (role != null && role.getDefaultVoiceType() != null) {
                            voiceType = role.getDefaultVoiceType();
                            log.info("[VOICE][TTS_REQUEST] Using role default voice: " + voiceType);
                        }
                    }
                } catch (Exception e) {
                    log.warn("[VOICE][TTS_REQUEST] Failed to get role default voice: " + e.getMessage());
                }
            }
            
            // 如果仍然没有音色类型，使用默认值
            if (voiceType.isEmpty()) {
                voiceType = "qiniu_zh_female_wwxkjx";
            }
            
            if (!text.isEmpty()) {
                // 使用流式TTS服务合成音频，使用前端选择的音色或角色默认音色
                String finalVoiceType = voiceType;
                streamingTTSService.processStreamingTTS(text, voiceType, "mp3", session, session.getId(), messageId)
                    .thenRun(() -> {
                        log.info("[VOICE][STREAMING_TTS_COMPLETED] for session: " + session.getId());
                    })
                    .exceptionally(throwable -> {
                        log.info("[VOICE][STREAMING_TTS_FAIL] " + throwable.getMessage());
                        
                        // 回退到传统TTS处理
                        try {
                            String b64 = qiniuVoiceService.ttsToBase64(text, finalVoiceType, "mp3");
                            if (b64 == null || b64.isEmpty()) b64 = Base64.getEncoder().encodeToString(new byte[0]);
                            ObjectNode tts = mapper.createObjectNode();
                            tts.put("type", "tts_blob");
                            tts.put("mime", "audio/mp3");
                            tts.put("payload", b64);
                            if (!messageId.isEmpty()) {
                                tts.put("messageId", messageId);
                            }
                            session.sendMessage(new TextMessage(tts.toString()));
                            log.info("[VOICE][TTS_FALLBACK] -> mime=audio/mp3 sizeB64=" + b64.length());
                        } catch (Exception e) {
                            log.info("[VOICE][TTS_FALLBACK_FAIL] " + e.getMessage());
                        }
                        return null;
                    });
            }
            return;
        }

        // 处理停止LLM请求（来自用户打断）
        if ("stop_llm".equals(type)) {
            String reason = node.path("reason").asText("user_interrupt");
            log.info("[VOICE][STOP_LLM] User interrupted LLM: " + reason);
            
            // 调用StreamingLLMService停止指定会话的LLM
            streamingLLMService.stopStreamingLLM(session.getId());
            return;
        }

        // 处理实时TTS请求（来自LLM流式生成过程中的句子边界检测）
        if ("realtime_tts_request".equals(type)) {
            String text = node.path("content").asText("");
            String voiceType = node.path("voiceType").asText("");
            if (!text.isEmpty()) {
                // 检查是否在通话状态
                if (callSessions.containsKey(session.getId())) {
                    log.info("[VOICE][REALTIME_TTS_REQUEST] In call state, processing real-time TTS for sentence: " + text);
                    
                    // 如果没有传递音色类型，尝试从角色获取默认音色
                    if (voiceType.isEmpty()) {
                        try {
                            // 从通话会话中获取conversationId
                            String callId = callSessions.get(session.getId());
                            CallInfo callInfo = activeCalls.get(callId);
                            if (callInfo != null && callInfo.conversationId != null) {
                                Conversation conversation = conversationMapper.selectById(callInfo.conversationId);
                                if (conversation != null) {
                                    AppRole role = appRoleMapper.selectById(conversation.getRoleId());
                                    if (role != null && role.getDefaultVoiceType() != null) {
                                        voiceType = role.getDefaultVoiceType();
                                        log.info("[VOICE][REALTIME_TTS_REQUEST] Using role default voice: " + voiceType);
                                    }
                                }
                            }
                        } catch (Exception e) {
                            log.warn("[VOICE][REALTIME_TTS_REQUEST] Failed to get role default voice: " + e.getMessage());
                        }
                    }
                    
                    // 如果仍然没有音色类型，使用默认值
                    if (voiceType.isEmpty()) {
                        voiceType = "qiniu_zh_female_wwxkjx";
                    }
                    
                    // 立即调用TTS合成单个句子
                    try {
                        String b64 = qiniuVoiceService.ttsToBase64(text, voiceType, "mp3");
                        if (b64 != null && !b64.isEmpty()) {
                            ObjectNode tts = mapper.createObjectNode();
                            tts.put("type", "tts_blob");
                            tts.put("mime", "audio/mp3");
                            tts.put("payload", b64);
                            session.sendMessage(new TextMessage(tts.toString()));
                            log.info("[VOICE][REALTIME_TTS_SENT] -> mime=audio/mp3 sizeB64=" + b64.length());
                        } else {
                            log.info("[VOICE][REALTIME_TTS_EMPTY] TTS returned empty result for sentence: " + text);
                        }
                    } catch (Exception e) {
                        log.info("[VOICE][REALTIME_TTS_ERROR] " + e.getMessage());
                    }
                } else {
                    log.info("[VOICE][REALTIME_TTS_REQUEST] Not in call state, ignoring real-time TTS request");
                }
            }
            return;
        }

        // 处理自动TTS请求（来自LLM完成后的自动触发）
        if ("auto_tts_request".equals(type)) {
            String text = node.path("content").asText("");
            String voiceType = node.path("voiceType").asText("");
            if (!text.isEmpty()) {
                // 检查是否在通话状态
                if (callSessions.containsKey(session.getId())) {
                    log.info("[VOICE][AUTO_TTS_REQUEST] In call state, processing TTS for text length: " + text.length());
                    
                    // 如果没有传递音色类型，尝试从角色获取默认音色
                    if (voiceType.isEmpty()) {
                        try {
                            // 从通话会话中获取conversationId
                            String callId = callSessions.get(session.getId());
                            CallInfo callInfo = activeCalls.get(callId);
                            if (callInfo != null && callInfo.conversationId != null) {
                                Conversation conversation = conversationMapper.selectById(callInfo.conversationId);
                                if (conversation != null) {
                                    AppRole role = appRoleMapper.selectById(conversation.getRoleId());
                                    if (role != null && role.getDefaultVoiceType() != null) {
                                        voiceType = role.getDefaultVoiceType();
                                        log.info("[VOICE][AUTO_TTS_REQUEST] Using role default voice: " + voiceType);
                                    }
                                }
                            }
                        } catch (Exception e) {
                            log.warn("[VOICE][AUTO_TTS_REQUEST] Failed to get role default voice: " + e.getMessage());
                        }
                    }
                    
                    // 如果仍然没有音色类型，使用默认值
                    if (voiceType.isEmpty()) {
                        voiceType = "qiniu_zh_female_wwxkjx";
                    }
                    
                    // 使用流式TTS服务合成音频，使用选择的音色
                    String finalVoiceType = voiceType;
                    streamingTTSService.processStreamingTTS(text, voiceType, "mp3", session, session.getId(), null)
                        .thenRun(() -> {
                            log.info("[VOICE][AUTO_TTS_COMPLETED] for session: " + session.getId());
                        })
                        .exceptionally(throwable -> {
                            log.info("[VOICE][AUTO_TTS_FAIL] " + throwable.getMessage());
                            
                            // 回退到传统TTS处理
                            try {
                                String b64 = qiniuVoiceService.ttsToBase64(text, finalVoiceType, "mp3");
                                if (b64 == null || b64.isEmpty()) b64 = Base64.getEncoder().encodeToString(new byte[0]);
                                ObjectNode tts = mapper.createObjectNode();
                                tts.put("type", "tts_blob");
                                tts.put("mime", "audio/mp3");
                                tts.put("payload", b64);
                                session.sendMessage(new TextMessage(tts.toString()));
                                log.info("[VOICE][AUTO_TTS_FALLBACK] -> mime=audio/mp3 sizeB64=" + b64.length());
                            } catch (Exception e) {
                                log.info("[VOICE][AUTO_TTS_FALLBACK_FAIL] " + e.getMessage());
                            }
                            return null;
                        });
                } else {
                    log.info("[VOICE][AUTO_TTS_REQUEST] Not in call state, ignoring auto TTS request");
                }
            }
            return;
        }

        // 支持分片：audio_start / audio_chunk / audio_end
        if ("audio_start".equals(type)) {
            String mime = node.path("mime").asText("audio/wav"); // 统一使用WAV格式
            String convId = node.path("conversationId").asText("");
            sessionAudioBuf.put(session.getId(), new StringBuilder(1024 * 64));
            log.info("[VOICE][AUDIO_START] session=" + session.getId() + ", conv=" + convId + ", mime=" + mime);
            session.sendMessage(new TextMessage("{\"type\":\"ack\",\"evt\":\"audio_start\"}"));
            return;
        }
        if ("audio_chunk".equals(type)) {
            int seq = node.path("seq").asInt(0);
            String chunk = node.path("payload").asText("");
            String mime = node.path("mime").asText("audio/wav"); // 统一使用WAV格式
            log.info("[VOICE][AUDIO_CHUNK] session=" + session.getId() + " mime=" + mime + " sizeB64=" + (chunk != null ? chunk.length() : 0));
            StringBuilder sb = sessionAudioBuf.computeIfAbsent(session.getId(), k -> new StringBuilder());
            sb.append(chunk);
            
            // 如果是PCM音频数据，直接处理
            if ("audio/pcm".equals(mime)) {
                log.info("[VOICE][PCM_CHUNK] session=" + session.getId() + " sizeB64=" + (chunk != null ? chunk.length() : 0));
                
                // 将PCM数据转换为字节数组
                try {
                    byte[] pcmBytes = java.util.Base64.getDecoder().decode(chunk);
                    
                    // 使用流式ASR服务处理PCM数据
                    streamingASRService.processStreamingASR(pcmBytes, "pcm", session, session.getId())
                        .thenAccept(result -> {
                            if (result != null && !result.trim().isEmpty()) {
                                log.info("[VOICE][PCM_ASR_RESULT] text=" + result);
                                
                                // 获取会话ID
                                String convIdStr = node.path("conversationId").asText("");
                                Long convIdLong = null;
                                if (!convIdStr.isEmpty()) {
                                    try { convIdLong = Long.parseLong(convIdStr); } catch (Exception ignore) {}
                                }
                                
                                // 调用流式LLM服务
                                try {
                                    streamingLLMService.processStreamingLLM(result, session, convIdLong);
                                    log.info("[VOICE][PCM_LLM_START] -> " + result);
                                } catch (Exception e) {
                                    log.info("[VOICE][PCM_LLM_FAIL] " + e.getMessage());
                                }
                            }
                        })
                        .exceptionally(throwable -> {
                            log.info("[VOICE][PCM_ASR_FAIL] " + throwable.getMessage());
                            return null;
                        });
                        
                } catch (Exception e) {
                    log.info("[VOICE][PCM_DECODE_FAIL] " + e.getMessage());
                }
            } else if ("audio/webm".equals(mime) || "audio/webm;codecs=opus".equals(mime) || 
                      "audio/mp4".equals(mime) || "audio/mp4;codecs=opus".equals(mime) ||
                      "audio/wav".equals(mime)) {
                // 处理WebM/MP4/WAV音频数据
                log.info("[VOICE][AUDIO_CHUNK] session=" + session.getId() + " mime=" + mime + " sizeB64=" + (chunk != null ? chunk.length() : 0));
                
                try {
                    // 将音频数据转换为字节数组
                    byte[] audioBytes = java.util.Base64.getDecoder().decode(chunk);
                    
                    // 使用流式ASR服务处理音频数据
                    streamingASRService.processStreamingASR(audioBytes, mime, session, session.getId())
                        .thenAccept(result -> {
                            if (result != null && !result.trim().isEmpty()) {
                                log.info("[VOICE][AUDIO_ASR_RESULT] text=" + result);
                                
                                // 获取会话ID
                                String convIdStr = node.path("conversationId").asText("");
                                Long convIdLong = null;
                                if (!convIdStr.isEmpty()) {
                                    try { convIdLong = Long.parseLong(convIdStr); } catch (Exception ignore) {}
                                }
                                
                                // 立即发送用户消息到前端显示
                                try {
                                    ObjectNode userMessage = mapper.createObjectNode();
                                    userMessage.put("type", "user_message");
                                    userMessage.put("content", result);
                                    userMessage.put("timestamp", java.time.LocalDateTime.now().toString());
                                    session.sendMessage(new TextMessage(userMessage.toString()));
                                    log.info("[VOICE][USER_MSG_SENT] -> " + result);
                                } catch (Exception e) {
                                    log.info("[VOICE][USER_MSG_SEND_FAIL] " + e.getMessage());
                                }
                                
                                // 保存用户语音消息到数据库
                                if (convIdLong != null) {
                                    try {
                                        Conversation c = conversationMapper.selectById(convIdLong);
                                        if (c != null) {
                                            Message userMsg = Message.builder()
                                                    .conversationId(convIdLong)
                                                    .content(result)
                                                    .originalContent(null)
                                                    .language(c.getLastLanguage() != null ? c.getLastLanguage() : "zh")
                                                    .isUserMessage(1)
                                                    .emotion(null)
                                                    .timestamp(java.time.LocalDateTime.now())
                                                    .build();
                                            messageMapper.insert(userMsg);
                                            log.info("[VOICE][DB_USER_MSG] Saved user message: " + result);
                                        }
                                    } catch (Exception e) {
                                        log.info("[VOICE][DB_FAIL_USER_MSG] " + e.getMessage());
                                    }
                                }
                                
                                // 调用流式LLM服务
                                try {
                                    streamingLLMService.processStreamingLLM(result, session, convIdLong);
                                    log.info("[VOICE][AUDIO_LLM_START] -> " + result);
                                } catch (Exception e) {
                                    log.info("[VOICE][AUDIO_LLM_FAIL] " + e.getMessage());
                                }
                            } else {
                                log.info("[VOICE][AUDIO_ASR_EMPTY] ASR result is empty, skipping LLM request");
                            }
                        })
                        .exceptionally(throwable -> {
                            log.info("[VOICE][AUDIO_ASR_FAIL] " + throwable.getMessage());
                            return null;
                        });
                        
                } catch (Exception e) {
                    log.info("[VOICE][AUDIO_DECODE_FAIL] " + e.getMessage());
                }
            } else {
                // 原有的分片处理逻辑
                if (seq % 10 == 0) log.info("[VOICE][AUDIO_CHUNK] session=" + session.getId() + " seq=" + seq + " sizeB64+=" + (chunk != null ? chunk.length() : 0));
            }
            return;
        }
        if ("audio_end".equals(type)) {
            String convId = node.path("conversationId").asText("");
            String mime = node.path("mime").asText("audio/wav"); // 统一使用WAV格式
            StringBuilder sb = sessionAudioBuf.remove(session.getId());
            String base64 = sb != null ? sb.toString() : "";
            log.info("[VOICE][AUDIO_END] session=" + session.getId() + ", conv=" + convId + ", totalB64=" + base64.length());

            // 先上传并尝试 ASR，拿到真实文本（失败回退占位）
            String asrText = "占位：识别到的文本";
            try {
                byte[] bytes = java.util.Base64.getDecoder().decode(base64);
                String ext = mime.contains("webm") ? "webm" : (mime.contains("ogg") ? "ogg" : "dat");
                String url = kodoStorageService.uploadBytes(bytes, ext);
                log.info("[VOICE][UPLOAD_OK] url=" + url);
                String asrFormat = ext.equals("webm") ? "ogg" : ext; // webm 基本为 opus，按 ogg 识别
                String realText = qiniuVoiceService.asrByUrl(url, asrFormat);
                log.info("[VOICE][ASR_CALL] fmt=" + asrFormat + " text=" + realText);
                if (realText != null && !realText.isBlank()) {
                    asrText = realText;
                }
            } catch (Exception e) {
                log.info("[VOICE][ASR_FAIL] " + e.getMessage());
            }

            // 回发最终 ASR 文本
            ObjectNode asr = mapper.createObjectNode();
            asr.put("type", "asr_final");
            asr.put("text", asrText);
            session.sendMessage(new TextMessage(asr.toString()));
            log.info("[VOICE][ASR_FINAL] -> " + asr.toString());

            // 入库用户语音转写为文本的消息
            try {
                String convIdStr = node.path("conversationId").asText("");
                Long convIdLong = null;
                if (!convIdStr.isEmpty()) {
                    try { convIdLong = Long.parseLong(convIdStr); } catch (Exception ignore) {}
                }
                if (convIdLong != null) {
                    Conversation c = conversationMapper.selectById(convIdLong);
                    if (c != null) {
                        Message userMsg = Message.builder()
                                .conversationId(convIdLong)
                                .content(asrText)
                                .originalContent(null)
                                .language(c.getLastLanguage() != null ? c.getLastLanguage() : "zh")
                                .isUserMessage(1)
                                .emotion(null)
                                .timestamp(java.time.LocalDateTime.now())
                                .build();
                        messageMapper.insert(userMsg);
                    }
                }
            } catch (Exception e) {
                log.info("[VOICE][DB_FAIL_USER] " + e.getMessage());
            }

            // 使用流式LLM处理
            String llmText = "占位：AI 的回复";
            try {
                String convIdStr = node.path("conversationId").asText("");
                Long convIdLong = null;
                if (!convIdStr.isEmpty()) {
                    try { convIdLong = Long.parseLong(convIdStr); } catch (Exception ignore) {}
                }
                
                // 构建包含角色提示词的消息列表
                List<org.springframework.ai.chat.messages.Message> messages = buildMessagesWithRolePrompt(asrText, convIdLong);
                
                // 调用流式LLM服务（在语音通话状态）
                streamingLLMService.processStreamingLLM(messages, session, convIdLong, true);
                log.info("[VOICE][STREAMING_LLM_START] -> " + asrText);
                
                // 检查是否在通话状态，如果是则自动触发TTS
                if (callSessions.containsKey(session.getId())) {
                    log.info("[VOICE][AUTO_TTS_TRIGGER] In call state, will auto-trigger TTS after LLM completion");
                    // 注意：TTS将在LLM完成后通过WebSocket消息自动触发
                } else {
                    // 非通话状态，等待前端发送tts_request
                    log.info("[VOICE][NON_CALL_STATE] Not in call, waiting for tts_request");
                }
                return;
                
            } catch (Exception e) {
                log.info("[VOICE][STREAMING_LLM_FAIL] " + e.getMessage());
                
                // 回退到非流式处理
                try {
                    String reply = chatClient.prompt().messages(new UserMessage(asrText)).call().content();
                    if (reply != null && !reply.isBlank()) {
                        llmText = reply;
                    }
                } catch (Exception ex) {
                    log.info("[VOICE][LLM_FALLBACK_FAIL] " + ex.getMessage());
                }

                ObjectNode llm = mapper.createObjectNode();
                llm.put("type", "llm_text");
                llm.put("text", llmText);
                session.sendMessage(new TextMessage(llm.toString()));
                log.info("[VOICE][LLM_FALLBACK] -> " + llm.toString());
            }

            // 入库 AI 文本消息
            try {
                String convIdStr = node.path("conversationId").asText("");
                Long convIdLong = null;
                if (!convIdStr.isEmpty()) {
                    try { convIdLong = Long.parseLong(convIdStr); } catch (Exception ignore) {}
                }
                if (convIdLong != null) {
                    Conversation c = conversationMapper.selectById(convIdLong);
                    if (c != null) {
                        Message botMsg = Message.builder()
                                .conversationId(convIdLong)
                                .content(llmText)
                                .originalContent(null)
                                .language(c.getLastLanguage() != null ? c.getLastLanguage() : "zh")
                                .isUserMessage(0)
                                .emotion("neutral")
                                .timestamp(java.time.LocalDateTime.now())
                                .build();
                        messageMapper.insert(botMsg);
                    }
                }
            } catch (Exception e) {
                log.info("[VOICE][DB_FAIL_BOT] " + e.getMessage());
            }

            // 获取角色的默认音色
            String voiceType2 = "qiniu_zh_female_wwxkjx"; // 默认音色
            try {
                String convIdStr2 = node.path("conversationId").asText("");
                if (!convIdStr2.isEmpty()) {
                    Long conversationId = Long.parseLong(convIdStr2);
                    Conversation conversation = conversationMapper.selectById(conversationId);
                    if (conversation != null) {
                        AppRole role = appRoleMapper.selectById(conversation.getRoleId());
                        if (role != null && role.getDefaultVoiceType() != null) {
                            voiceType2 = role.getDefaultVoiceType();
                            log.info("[VOICE][VOICE_MESSAGE] Using role default voice: " + voiceType2);
                        }
                    }
                }
            } catch (Exception e) {
                log.warn("[VOICE][VOICE_MESSAGE] Failed to get role default voice: " + e.getMessage());
            }
            
            // 使用流式TTS服务合成音频
            final String finalLlmText2 = llmText; // 创建final变量供lambda使用
            final String finalVoiceType2 = voiceType2; // 创建final变量供lambda使用
            streamingTTSService.processStreamingTTS(finalLlmText2, voiceType2, "mp3", session, session.getId(), null)
                .thenRun(() -> {
                    log.info("[VOICE][STREAMING_TTS_COMPLETED] for session: " + session.getId());
                })
                .exceptionally(throwable -> {
                    log.info("[VOICE][STREAMING_TTS_FAIL] " + throwable.getMessage());
                    
                    // 回退到传统TTS处理
                    try {
                        String b64 = qiniuVoiceService.ttsToBase64(finalLlmText2, finalVoiceType2, "mp3");
                        if (b64 == null || b64.isEmpty()) b64 = Base64.getEncoder().encodeToString(new byte[0]);
                        ObjectNode tts = mapper.createObjectNode();
                        tts.put("type", "tts_blob");
                        tts.put("mime", "audio/mp3");
                        tts.put("payload", b64);
                        session.sendMessage(new TextMessage(tts.toString()));
                        log.info("[VOICE][TTS_FALLBACK] -> mime=audio/mp3 sizeB64=" + b64.length());
                    } catch (Exception e) {
                        log.info("[VOICE][TTS_FALLBACK_FAIL] " + e.getMessage());
                    }
                    return null;
                });

            // 入库 TTS 音频消息（保存音频 URL 和元信息）
            try {
                String convIdStr = node.path("conversationId").asText("");
                Long convIdLong = null;
                if (!convIdStr.isEmpty()) {
                    try { convIdLong = Long.parseLong(convIdStr); } catch (Exception ignore) {}
                }
                if (convIdLong != null) {
                    Conversation c = conversationMapper.selectById(convIdLong);
                    if (c != null) {
                        // 将 TTS 音频上传到 Kodo 保存
                        String b64 = qiniuVoiceService.ttsToBase64(llmText, "qiniu_zh_female_wwxkjx", "mp3");
                        if (b64 == null || b64.isEmpty()) b64 = Base64.getEncoder().encodeToString(new byte[0]);
                        byte[] audioBytes = Base64.getDecoder().decode(b64);
                        String audioUrl = kodoStorageService.uploadBytes(audioBytes, "mp3");
                        
                        // 计算音频时长（粗略估算：mp3 128kbps 约 1KB/秒）
                        int audioSizeKB = audioBytes.length / 1024;
                        int estimatedDurationMs = audioSizeKB * 1000 / 128; // 假设 128kbps
                        String audioInfo = String.format("{\"type\":\"tts\",\"url\":\"%s\",\"format\":\"mp3\",\"sizeKB\":%d,\"durationMs\":%d}", audioUrl, audioSizeKB, estimatedDurationMs);
                        
                        Message ttsMsg = Message.builder()
                                .conversationId(convIdLong)
                                .content("[语音回复]")
                                .originalContent(audioInfo)
                                .language(c.getLastLanguage() != null ? c.getLastLanguage() : "zh")
                                .isUserMessage(0)
                                .emotion("neutral")
                                .timestamp(java.time.LocalDateTime.now())
                                .build();
                        messageMapper.insert(ttsMsg);
                        log.info("[VOICE][DB_TTS] url=" + audioUrl + " audioSizeKB=" + audioSizeKB + " durationMs=" + estimatedDurationMs);
                    }
                }
            } catch (Exception e) {
                log.info("[VOICE][DB_FAIL_TTS] " + e.getMessage());
            }
            return;
        }

        // 预留：流式 audio_start/audio_chunk/audio_end 协议
        log.info("[VOICE][ERROR] unknown type: " + type);
        session.sendMessage(new TextMessage("{\"type\":\"error\",\"message\":\"unknown type\"}"));
    }

    @Override
    public void afterConnectionClosed(WebSocketSession session, CloseStatus status) {
        // 清理通话会话
        String callId = callSessions.remove(session.getId());
        if (callId != null) {
            CallInfo callInfo = activeCalls.get(callId);
            if (callInfo != null) {
                // 通知对方用户断开连接
                if (callInfo.callerSession != null && !callInfo.callerSession.getId().equals(session.getId())) {
                    try {
                        callInfo.callerSession.sendMessage(new TextMessage("{\"type\":\"call_ended\",\"reason\":\"peer_disconnected\"}"));
                    } catch (Exception e) {
                        log.error("[VOICE][ERROR] Failed to notify caller: " + e.getMessage());
                    }
                }
                if (callInfo.calleeSession != null && !callInfo.calleeSession.getId().equals(session.getId())) {
                    try {
                        callInfo.calleeSession.sendMessage(new TextMessage("{\"type\":\"call_ended\",\"reason\":\"peer_disconnected\"}"));
                    } catch (Exception e) {
                        log.error("[VOICE][ERROR] Failed to notify callee: " + e.getMessage());
                    }
                }
                activeCalls.remove(callId);
            }
        }
        
        sessions.remove(session.getId());
        sessionAudioBuf.remove(session.getId());
        
        // 清理流式服务数据
        streamingASRService.cleanupSession(session.getId());
        streamingTTSService.cleanupSession(session.getId());
        streamingLLMService.cleanupSession(session.getId());
        
        log.info("[VOICE][CLOSE] session=" + session.getId() + " code=" + status);
    }
    
    // 通话信息类
    private static class CallInfo {
        String callId;
        String callerId;
        String calleeId;
        Long conversationId;
        WebSocketSession callerSession;
        WebSocketSession calleeSession;
        CallState state;
        long startTime;
        
        CallInfo(String callId, String callerId, String calleeId, Long conversationId, WebSocketSession callerSession) {
            this.callId = callId;
            this.callerId = callerId;
            this.calleeId = calleeId;
            this.conversationId = conversationId;
            this.callerSession = callerSession;
            this.state = CallState.RINGING;
            this.startTime = System.currentTimeMillis();
        }
    }
    
    // 通话状态枚举
    private enum CallState {
        RINGING,    // 振铃中
        CONNECTED,  // 已连接
        ENDED       // 已结束
    }
    
    // 处理语音通话信令
    private boolean handleCallSignaling(WebSocketSession session, JsonNode node) throws Exception {
        String type = node.path("type").asText("");
        
        switch (type) {
            case "call_start":
                return handleCallStart(session, node);
            case "call_answer":
                return handleCallAnswer(session, node);
            case "call_reject":
                return handleCallReject(session, node);
            case "call_hangup":
                return handleCallHangup(session, node);
            case "ice_candidate":
                return handleIceCandidate(session, node);
            case "offer":
                return handleOffer(session, node);
            case "answer":
                return handleAnswer(session, node);
            default:
                return false; // 不是通话信令，继续其他处理
        }
    }
    
    // 处理呼叫开始
    private boolean handleCallStart(WebSocketSession session, JsonNode node) throws Exception {
        String callId = node.path("callId").asText("");
        String calleeId = node.path("calleeId").asText("");
        Long conversationId = node.path("conversationId").asLong(0L);
        JsonNode offerNode = node.path("offer");
        
        if (callId.isEmpty() || calleeId.isEmpty() || conversationId == 0L) {
            session.sendMessage(new TextMessage("{\"type\":\"call_error\",\"message\":\"Missing required parameters\"}"));
            return true;
        }
        
        // 检查是否已有活跃通话
        if (activeCalls.containsKey(callId)) {
            session.sendMessage(new TextMessage("{\"type\":\"call_error\",\"message\":\"Call already exists\"}"));
            return true;
        }
        
        // 创建通话信息
        CallInfo callInfo = new CallInfo(callId, session.getId(), calleeId, conversationId, session);
        activeCalls.put(callId, callInfo);
        callSessions.put(session.getId(), callId);
        
        log.info("[VOICE][CALL_START] callId=" + callId + ", caller=" + session.getId() + ", callee=" + calleeId);
        
        // AI角色自动接听，直接发送连接成功消息
        session.sendMessage(new TextMessage("{\"type\":\"call_connected\",\"callId\":\"" + callId + "\"}"));
        
        // 发送开场白
        try {
            Conversation conversation = conversationMapper.selectById(conversationId);
            if (conversation != null) {
                AppRole role = appRoleMapper.selectById(conversation.getRoleId());
                if (role != null && role.getOpeningLine() != null && !role.getOpeningLine().trim().isEmpty()) {
                    String openingLine = role.getOpeningLine();
                    
                    // 替换模板变量
                    openingLine = openingLine.replace("{role_name}", role.getName());
                    openingLine = openingLine.replace("{personality}", role.getPersonality() != null ? role.getPersonality() : "");
                    openingLine = openingLine.replace("{background}", role.getBackground() != null ? role.getBackground() : "");
                    
                    // 发送开场白文本消息
                    ObjectNode openingMessage = mapper.createObjectNode();
                    openingMessage.put("type", "ai_message");
                    openingMessage.put("content", openingLine);
                    openingMessage.put("isUserMessage", false);
                    openingMessage.put("timestamp", System.currentTimeMillis());
                    session.sendMessage(new TextMessage(openingMessage.toString()));
                    
                    // 保存开场白到数据库
                    Message openingMsg = Message.builder()
                            .conversationId(conversationId)
                            .content(openingLine)
                            .originalContent(null)
                            .language(conversation.getLastLanguage() != null ? conversation.getLastLanguage() : "zh")
                            .isUserMessage(0)
                            .emotion("friendly")
                            .timestamp(java.time.LocalDateTime.now())
                            .build();
                    messageMapper.insert(openingMsg);
                    
                    // 如果角色有默认音色，进行TTS
                    String voiceType = role.getDefaultVoiceType();
                    if (voiceType != null && !voiceType.trim().isEmpty()) {
                        streamingTTSService.processStreamingTTS(openingLine, voiceType, "mp3", session, session.getId(), null)
                            .thenRun(() -> {
                                log.info("[VOICE][OPENING_LINE_TTS_COMPLETED] for session: " + session.getId());
                            })
                            .exceptionally(throwable -> {
                                log.info("[VOICE][OPENING_LINE_TTS_FAIL] " + throwable.getMessage());
                                return null;
                            });
                    }
                    
                    log.info("[VOICE][OPENING_LINE] Sent opening line: " + openingLine);
                }
            }
        } catch (Exception e) {
            log.warn("[VOICE][OPENING_LINE_FAIL] " + e.getMessage());
        }
        
        return true;
    }
    
    // 处理接听
    private boolean handleCallAnswer(WebSocketSession session, JsonNode node) throws Exception {
        String callId = node.path("callId").asText("");
        JsonNode answerNode = node.path("answer");
        
        CallInfo callInfo = activeCalls.get(callId);
        if (callInfo == null) {
            session.sendMessage(new TextMessage("{\"type\":\"call_error\",\"message\":\"Call not found\"}"));
            return true;
        }
        
        if (callInfo.state != CallState.RINGING) {
            session.sendMessage(new TextMessage("{\"type\":\"call_error\",\"message\":\"Call not in ringing state\"}"));
            return true;
        }
        
        callInfo.state = CallState.CONNECTED;
        callInfo.calleeSession = session;
        callSessions.put(session.getId(), callId);
        
        log.info("[VOICE][CALL_ANSWER] callId=" + callId + ", callee=" + session.getId());
        
        // 如果有Answer，转发给主叫方
        if (!answerNode.isMissingNode()) {
            String connectedMessage = String.format(
                "{\"type\":\"call_connected\",\"callId\":\"%s\",\"answer\":%s}",
                callId, answerNode.toString()
            );
            callInfo.callerSession.sendMessage(new TextMessage(connectedMessage));
            session.sendMessage(new TextMessage("{\"type\":\"call_connected\",\"callId\":\"" + callId + "\"}"));
        } else {
            // 只发送连接通知
            callInfo.callerSession.sendMessage(new TextMessage("{\"type\":\"call_connected\",\"callId\":\"" + callId + "\"}"));
            session.sendMessage(new TextMessage("{\"type\":\"call_connected\",\"callId\":\"" + callId + "\"}"));
        }
        
        return true;
    }
    
    // 处理拒绝
    private boolean handleCallReject(WebSocketSession session, JsonNode node) throws Exception {
        String callId = node.path("callId").asText("");
        
        CallInfo callInfo = activeCalls.get(callId);
        if (callInfo == null) {
            return true;
        }
        
        log.info("[VOICE][CALL_REJECT] callId=" + callId + ", callee=" + session.getId());
        
        // 通知主叫方被拒绝
        callInfo.callerSession.sendMessage(new TextMessage("{\"type\":\"call_rejected\",\"callId\":\"" + callId + "\"}"));
        
        // 清理通话信息
        activeCalls.remove(callId);
        callSessions.remove(callInfo.callerSession.getId());
        callSessions.remove(session.getId());
        
        return true;
    }
    
    // 处理挂断
    private boolean handleCallHangup(WebSocketSession session, JsonNode node) throws Exception {
        String callId = node.path("callId").asText("");
        
        CallInfo callInfo = activeCalls.get(callId);
        if (callInfo == null) {
            return true;
        }
        
        log.info("[VOICE][CALL_HANGUP] callId=" + callId + ", session=" + session.getId());
        
        // 通知对方挂断
        WebSocketSession otherSession = callInfo.callerSession.getId().equals(session.getId()) ? 
                                       callInfo.calleeSession : callInfo.callerSession;
        if (otherSession != null) {
            otherSession.sendMessage(new TextMessage("{\"type\":\"call_ended\",\"callId\":\"" + callId + "\",\"reason\":\"hangup\"}"));
        }
        
        // 清理通话信息
        activeCalls.remove(callId);
        callSessions.remove(callInfo.callerSession.getId());
        if (callInfo.calleeSession != null) {
            callSessions.remove(callInfo.calleeSession.getId());
        }
        
        return true;
    }
    
    // 处理ICE候选
    private boolean handleIceCandidate(WebSocketSession session, JsonNode node) throws Exception {
        String callId = node.path("callId").asText("");
        if (callId.isEmpty()) {
            callId = callSessions.get(session.getId());
        }
        
        if (callId == null) {
            log.info("[VOICE][ICE_CANDIDATE] No call ID found for session: " + session.getId());
            return true;
        }
        
        CallInfo callInfo = activeCalls.get(callId);
        if (callInfo == null) {
            log.info("[VOICE][ICE_CANDIDATE] Call not found: " + callId);
            return true;
        }
        
        // 对于用户与AI的通话，不需要转发ICE候选
        // 因为AI是模拟的，没有实际的WebSocket会话
        if (callInfo.calleeSession != null) {
            // 这是用户与用户的通话，需要转发ICE候选
            WebSocketSession otherSession = callInfo.callerSession.getId().equals(session.getId()) ? 
                                           callInfo.calleeSession : callInfo.callerSession;
            if (otherSession != null && otherSession.isOpen()) {
                otherSession.sendMessage(new TextMessage(node.toString()));
                log.info("[VOICE][ICE_CANDIDATE] Forwarded ICE candidate for call: " + callId);
            } else {
                log.info("[VOICE][ICE_CANDIDATE] Other session not available for call: " + callId);
            }
        } else {
            // 这是用户与AI的通话，AI不需要ICE候选
            log.info("[VOICE][ICE_CANDIDATE] AI call, no need to forward ICE candidate: " + callId);
        }
        
        return true;
    }
    
    // 处理Offer
    private boolean handleOffer(WebSocketSession session, JsonNode node) throws Exception {
        String callId = node.path("callId").asText("");
        if (callId.isEmpty()) {
            callId = callSessions.get(session.getId());
        }
        
        if (callId == null) {
            log.info("[VOICE][OFFER] No call ID found for session: " + session.getId());
            return true;
        }
        
        CallInfo callInfo = activeCalls.get(callId);
        if (callInfo == null) {
            log.info("[VOICE][OFFER] Call not found: " + callId);
            return true;
        }
        
        // 对于用户与AI的通话，不需要转发Offer
        if (callInfo.calleeSession != null) {
            // 这是用户与用户的通话，需要转发Offer
            WebSocketSession otherSession = callInfo.callerSession.getId().equals(session.getId()) ? 
                                           callInfo.calleeSession : callInfo.callerSession;
            if (otherSession != null && otherSession.isOpen()) {
                otherSession.sendMessage(new TextMessage(node.toString()));
                log.info("[VOICE][OFFER] Forwarded offer for call: " + callId);
            } else {
                log.info("[VOICE][OFFER] Other session not available for call: " + callId);
            }
        } else {
            // 这是用户与AI的通话，AI不需要Offer
            log.info("[VOICE][OFFER] AI call, no need to forward offer: " + callId);
        }
        
        return true;
    }
    
    // 处理Answer
    private boolean handleAnswer(WebSocketSession session, JsonNode node) throws Exception {
        String callId = node.path("callId").asText("");
        if (callId.isEmpty()) {
            callId = callSessions.get(session.getId());
        }
        
        if (callId == null) {
            log.info("[VOICE][ANSWER] No call ID found for session: " + session.getId());
            return true;
        }
        
        CallInfo callInfo = activeCalls.get(callId);
        if (callInfo == null) {
            log.info("[VOICE][ANSWER] Call not found: " + callId);
            return true;
        }
        
        // 对于用户与AI的通话，不需要转发Answer
        if (callInfo.calleeSession != null) {
            // 这是用户与用户的通话，需要转发Answer
            WebSocketSession otherSession = callInfo.callerSession.getId().equals(session.getId()) ? 
                                           callInfo.calleeSession : callInfo.callerSession;
            if (otherSession != null && otherSession.isOpen()) {
                otherSession.sendMessage(new TextMessage(node.toString()));
                log.info("[VOICE][ANSWER] Forwarded answer for call: " + callId);
            } else {
                log.info("[VOICE][ANSWER] Other session not available for call: " + callId);
            }
        } else {
            // 这是用户与AI的通话，AI不需要Answer
            log.info("[VOICE][ANSWER] AI call, no need to forward answer: " + callId);
        }
        
        return true;
    }
    

    
    /**
     * 构建角色人设信息
     */
    private String buildPersona(AppRole role) {
        if (role == null) return "";
        StringBuilder sb = new StringBuilder();
        if (StringUtils.hasText(role.getName())) {
            sb.append("你正在扮演角色：").append(role.getName()).append("。\n");
        }
        if (StringUtils.hasText(role.getBackground())) {
            sb.append("背景：").append(role.getBackground()).append("\n");
        }
        if (StringUtils.hasText(role.getPersonality())) {
            sb.append("性格：").append(role.getPersonality()).append("\n");
        }
        if (StringUtils.hasText(role.getLanguageStyle())) {
            sb.append("语言风格：").append(role.getLanguageStyle()).append("\n");
        }
        if (StringUtils.hasText(role.getPromptTemplate())) {
            sb.append("提示：").append(role.getPromptTemplate()).append("\n");
        }
        sb.append("请以该角色的语气与用户对话，保持人设一致，回答简洁有温度。");
        sb.append("请使用第一人称（我、我的）来回复，不要使用第三人称。");
        return sb.toString();
    }
    
    /**
     * 构建包含角色提示词的消息列表
     */
    private List<org.springframework.ai.chat.messages.Message> buildMessagesWithRolePrompt(String userMessage, Long conversationId) {
        List<org.springframework.ai.chat.messages.Message> messages = new ArrayList<>();
        
        try {
            // 获取对话信息
            if (conversationId != null) {
                Conversation conversation = conversationMapper.selectById(conversationId);
                if (conversation != null && conversation.getRoleId() != null) {
                    // 获取角色信息
                    AppRole role = appRoleMapper.selectById(conversation.getRoleId());
                    if (role != null) {
                        // 构建完整的角色人设信息
                        String persona = buildPersona(role);
                        if (StringUtils.hasText(persona)) {
                            // 添加系统消息（完整角色人设）
                            messages.add(new SystemMessage(persona));
                            log.info("[VOICE][ROLE_PERSONA] Added complete role persona for role: {}", role.getName());
                        }
                    }
                    
                    // 获取最近20条历史消息作为上下文
                    List<com.qny.ai.entity.Message> history = messageMapper.selectList(
                        new com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper<com.qny.ai.entity.Message>()
                            .eq(com.qny.ai.entity.Message::getConversationId, conversationId)
                            .orderByAsc(com.qny.ai.entity.Message::getTimestamp)
                            .last("limit 20")
                    );
                    
                    // 将历史消息转换为Spring AI消息
                    for (com.qny.ai.entity.Message historyMsg : history) {
                        if (historyMsg.getIsUserMessage() != null && historyMsg.getIsUserMessage() == 1) {
                            messages.add(new UserMessage(historyMsg.getContent()));
                        } else {
                            messages.add(new org.springframework.ai.chat.messages.AssistantMessage(historyMsg.getContent()));
                        }
                    }
                    
                    log.info("[VOICE][CONTEXT] Added {} history messages for context", history.size());
                }
            }
            
            // 添加当前用户消息
            messages.add(new UserMessage(userMessage));
            
        } catch (Exception e) {
            log.error("[VOICE][ROLE_PROMPT_ERROR] Failed to build messages with role prompt: ", e);
            // 如果出错，至少添加用户消息
            messages.add(new UserMessage(userMessage));
        }
        
        return messages;
    }
}


