package com.qny.ai.service.impl;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.qny.ai.service.StreamingLLMService;
import com.qny.ai.mapper.ConversationMapper;
import com.qny.ai.mapper.MessageMapper;
import com.qny.ai.mapper.AppRoleMapper;
import com.qny.ai.entity.Conversation;
import com.qny.ai.entity.AppRole;
import com.qny.ai.service.impl.QiniuVoiceService;
import com.qny.ai.service.impl.KodoStorageService;
import org.springframework.ai.chat.messages.Message;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.chat.client.ChatClient;
import org.springframework.ai.chat.messages.UserMessage;
import org.springframework.stereotype.Service;
import org.springframework.web.socket.TextMessage;
import org.springframework.web.socket.WebSocketSession;

import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Pattern;

@Slf4j
@Service
@RequiredArgsConstructor
public class StreamingLLMServiceImpl implements StreamingLLMService {
    
    private final ChatClient streamingChatClient;
    private final ConversationMapper conversationMapper;
    private final MessageMapper messageMapper;
    private final AppRoleMapper appRoleMapper;
    private final QiniuVoiceService qiniuVoiceService;
    private final KodoStorageService kodoStorageService;
    private final ObjectMapper objectMapper = new ObjectMapper();
    
    // 存储每个会话的完整文本
    private final Map<String, StringBuilder> sessionTextBuffers = new ConcurrentHashMap<>();
    // 存储每个会话的conversationId
    private final Map<String, Long> sessionConversationIds = new ConcurrentHashMap<>();
    // 会话已处理的句子边界位置，用于避免重复处理
    private final Map<String, Integer> sessionProcessedPositions = new ConcurrentHashMap<>();
    // 存储每个会话的停止标志
    private final Map<String, Boolean> sessionStopFlags = new ConcurrentHashMap<>();
    // WebSocket发送同步锁
    
    // 句子结束标点符号模式
    private static final Pattern SENTENCE_END_PATTERN = Pattern.compile("[。！？.!?]");
    private final Map<String, Object> sessionLocks = new ConcurrentHashMap<>();
    
    /**
     * 停止指定会话的LLM流式生成
     */
    public void stopStreamingLLM(String sessionId) {
        log.info("[STREAMING_LLM] Stopping LLM for session: {}", sessionId);
        sessionStopFlags.put(sessionId, true);
    }
    
    /**
     * 检查会话是否应该停止
     */
    private boolean shouldStop(String sessionId) {
        return sessionStopFlags.getOrDefault(sessionId, false);
    }
    

    @Override
    public void processStreamingLLM(List<Message> messages, WebSocketSession session, Long conversationId) {
        // 默认不在语音通话状态
        processStreamingLLM(messages, session, conversationId, false);
    }
    
    @Override
    public void processStreamingLLM(List<Message> messages, WebSocketSession session, Long conversationId, boolean isInVoiceCall) {
        CompletableFuture.runAsync(() -> {
            try {
                log.info("[STREAMING_LLM] Starting streaming LLM for conversation: {}, isInVoiceCall: {}", conversationId, isInVoiceCall);
                
                // 发送开始信号
                sendStreamingEvent(session, "llm_start", "开始生成回复...");
                
                // 初始化文本缓冲区和会话信息
                String sessionId = session.getId();
                StringBuilder textBuffer = sessionTextBuffers.computeIfAbsent(sessionId, k -> new StringBuilder());
                textBuffer.setLength(0); // 清空缓冲区
                sessionConversationIds.put(sessionId, conversationId); // 保存conversationId
                sessionStopFlags.put(sessionId, false); // 初始化停止标志
                
                // 使用流式ChatClient
                streamingChatClient.prompt()
                    .messages(messages)
                    .stream()
                    .chatResponse()
                    .subscribe(
                        response -> {
                            // 检查是否应该停止
                            if (shouldStop(sessionId)) {
                                log.info("[STREAMING_LLM] LLM stopped by user interrupt for session: {}", sessionId);
                                return;
                            }
                            
                            // 处理每个流式响应块
                            String content = response.getResult().getOutput().getText();
                            if (content != null && !content.trim().isEmpty()) {
                                textBuffer.append(content); // 累积文本
                                sendStreamingEvent(session, "llm_chunk", content);
                                log.debug("[STREAMING_LLM] Sent chunk: {}", content);
                                
                                // 只有在语音通话状态时才进行实时句子检测和TTS
                                if (isInVoiceCall) {
                                    checkAndProcessVoiceCallSentences(session, textBuffer.toString());
                                }
                            }
                        },
                        error -> {
                            log.error("[STREAMING_LLM] Streaming error: ", error);
                            sendStreamingEvent(session, "llm_error", "生成回复时发生错误: " + error.getMessage());
                        },
                        () -> {
                            // 流式完成
                            String fullText = textBuffer.toString();
                            sendStreamingEvent(session, "llm_end", fullText); // 发送完整文本
                            
                            // 保存AI回复到数据库
                            saveBotMessageToDatabase(sessionId, fullText, session);
                            
                            // 清理已处理位置
                            sessionProcessedPositions.remove(sessionId);
                            
                            // 只有在语音通话状态时才发送自动TTS请求信号
                            if (isInVoiceCall) {
                                // 获取角色的默认音色
                                String voiceType = "qiniu_zh_female_wwxkjx"; // 默认音色
                                try {
                                    Long convId = sessionConversationIds.get(sessionId);
                                    if (convId != null) {
                                        Conversation conversation = conversationMapper.selectById(convId);
                                        if (conversation != null && conversation.getRoleId() != null) {
                                            AppRole role = appRoleMapper.selectById(conversation.getRoleId());
                                            if (role != null && role.getDefaultVoiceType() != null) {
                                                voiceType = role.getDefaultVoiceType();
                                                log.info("[STREAMING_LLM] Using role default voice for auto TTS: {}", voiceType);
                                            }
                                        }
                                    }
                                } catch (Exception e) {
                                    log.warn("[STREAMING_LLM] Failed to get role default voice for auto TTS: {}", e.getMessage());
                                }
                                
                                // 注释掉auto_tts_request，只使用realtime_tts_request
                                // log.info("[STREAMING_LLM] Sending auto TTS request signal for text length: {}", fullText.length());
                                // sendStreamingEvent(session, "auto_tts_request", fullText, voiceType);
                            }
                            
                            log.info("[STREAMING_LLM] Streaming completed for conversation: {}, text length: {}, isInVoiceCall: {}", 
                                    conversationId, fullText.length(), isInVoiceCall);
                        }
                    );
                    
            } catch (Exception e) {
                log.error("[STREAMING_LLM] Failed to process streaming LLM: ", e);
                sendStreamingEvent(session, "llm_error", "处理请求时发生错误: " + e.getMessage());
            }
        });
    }
    
    @Override
    public void processStreamingLLM(String userMessage, WebSocketSession session, Long conversationId) {
        // 默认不在语音通话状态
        processStreamingLLM(userMessage, session, conversationId, false);
    }
    
    @Override
    public void processStreamingLLM(String userMessage, WebSocketSession session, Long conversationId, boolean isInVoiceCall) {
        CompletableFuture.runAsync(() -> {
            try {
                log.info("[STREAMING_LLM] Starting streaming LLM for message: {}, isInVoiceCall: {}", userMessage, isInVoiceCall);
                
                // 发送开始信号
                sendStreamingEvent(session, "llm_start", "开始生成回复...");
                
                // 初始化文本缓冲区和会话信息
                String sessionId = session.getId();
                StringBuilder textBuffer = sessionTextBuffers.computeIfAbsent(sessionId, k -> new StringBuilder());
                textBuffer.setLength(0); // 清空缓冲区
                sessionConversationIds.put(sessionId, conversationId); // 保存conversationId
                sessionStopFlags.put(sessionId, false); // 初始化停止标志
                
                // 使用流式ChatClient处理单个消息
                streamingChatClient.prompt()
                    .user(userMessage)
                    .stream()
                    .chatResponse()
                    .subscribe(
                        response -> {
                            // 检查是否应该停止
                            if (shouldStop(sessionId)) {
                                log.info("[STREAMING_LLM] LLM stopped by user interrupt for session: {}", sessionId);
                                return;
                            }
                            
                            // 处理每个流式响应块
                            String content = response.getResult().getOutput().getText();
                            if (content != null && !content.trim().isEmpty()) {
                                textBuffer.append(content); // 累积文本
                                sendStreamingEvent(session, "llm_chunk", content);
                                log.debug("[STREAMING_LLM] Sent chunk: {}", content);
                                
                                // 只有在语音通话状态时才进行实时句子检测和TTS
                                if (isInVoiceCall) {
                                    checkAndProcessVoiceCallSentences(session, textBuffer.toString());
                                }
                            }
                        },
                        error -> {
                            log.error("[STREAMING_LLM] Streaming error: ", error);
                            sendStreamingEvent(session, "llm_error", "生成回复时发生错误: " + error.getMessage());
                        },
                        () -> {
                            // 流式完成
                            String fullText = textBuffer.toString();
                            sendStreamingEvent(session, "llm_end", fullText); // 发送完整文本
                            
                            // 保存AI回复到数据库
                            saveBotMessageToDatabase(sessionId, fullText, session);
                            
                            // 清理已处理位置
                            sessionProcessedPositions.remove(sessionId);
                            
                            // 只有在语音通话状态时才发送自动TTS请求信号
                            if (isInVoiceCall) {
                                // 获取角色的默认音色
                                String voiceType = "qiniu_zh_female_wwxkjx"; // 默认音色
                                try {
                                    Long convId = sessionConversationIds.get(sessionId);
                                    if (convId != null) {
                                        Conversation conversation = conversationMapper.selectById(convId);
                                        if (conversation != null && conversation.getRoleId() != null) {
                                            AppRole role = appRoleMapper.selectById(conversation.getRoleId());
                                            if (role != null && role.getDefaultVoiceType() != null) {
                                                voiceType = role.getDefaultVoiceType();
                                                log.info("[STREAMING_LLM] Using role default voice for auto TTS: {}", voiceType);
                                            }
                                        }
                                    }
                                } catch (Exception e) {
                                    log.warn("[STREAMING_LLM] Failed to get role default voice for auto TTS: {}", e.getMessage());
                                }
                                
                                // 注释掉auto_tts_request，只使用realtime_tts_request
                                // log.info("[STREAMING_LLM] Sending auto TTS request signal for text length: {}", fullText.length());
                                // sendStreamingEvent(session, "auto_tts_request", fullText, voiceType);
                            }
                            
                            log.info("[STREAMING_LLM] Streaming completed for message: {}, text length: {}, isInVoiceCall: {}", 
                                    userMessage, fullText.length(), isInVoiceCall);
                        }
                    );
                    
            } catch (Exception e) {
                log.error("[STREAMING_LLM] Failed to process streaming LLM: ", e);
                sendStreamingEvent(session, "llm_error", "处理请求时发生错误: " + e.getMessage());
            }
        });
    }
    
    @Override
    public String getSessionText(String sessionId) {
        StringBuilder buffer = sessionTextBuffers.get(sessionId);
        return buffer != null ? buffer.toString() : "";
    }
    
    @Override
    public void cleanupSession(String sessionId) {
        sessionTextBuffers.remove(sessionId);
        sessionConversationIds.remove(sessionId);
        sessionProcessedPositions.remove(sessionId);
        sessionStopFlags.remove(sessionId);
        sessionLocks.remove(sessionId);
        log.info("[STREAMING_LLM] Cleaned up session: {}", sessionId);
    }
    
    /**
     * 保存AI回复消息到数据库
     */
    private void saveBotMessageToDatabase(String sessionId, String content, WebSocketSession session) {
        try {
            Long conversationId = sessionConversationIds.get(sessionId);
            if (conversationId != null && !content.trim().isEmpty()) {
                Conversation c = conversationMapper.selectById(conversationId);
                if (c != null) {
                    com.qny.ai.entity.Message botMsg = com.qny.ai.entity.Message.builder()
                            .conversationId(conversationId)
                            .content(content)
                            .originalContent(null)
                            .language(c.getLastLanguage() != null ? c.getLastLanguage() : "zh")
                            .isUserMessage(0)
                            .emotion("neutral")
                            .audioUrl(null) // 初始为null，异步TTS完成后更新
                            .timestamp(java.time.LocalDateTime.now())
                            .build();
                    messageMapper.insert(botMsg);
                    log.info("[STREAMING_LLM] Saved bot message to database: conversationId={}, content length={}", conversationId, content.length());
                    
                    // 异步处理TTS并上传到Kodo
                    processAsyncTTS(botMsg.getId(), content, session);
                }
            }
        } catch (Exception e) {
            log.error("[STREAMING_LLM] Failed to save bot message to database: ", e);
        }
    }
    
    /**
     * 异步处理TTS并上传到Kodo
     */
    private void processAsyncTTS(Long messageId, String content, WebSocketSession session) {
        CompletableFuture.runAsync(() -> {
            try {
                log.info("[STREAMING_LLM] Starting async TTS for messageId: {}, content length: {}", messageId, content.length());
                
                // 调用TTS服务
                String base64Audio = qiniuVoiceService.ttsToBase64(content, "qiniu_zh_female_wwxkjx", "mp3");
                
                if (base64Audio != null && !base64Audio.trim().isEmpty()) {
                    // 上传到Kodo
                    String audioUrl = kodoStorageService.uploadBase64Audio(base64Audio, "mp3");
                    
                    // 更新消息记录，添加音频URL
                    com.qny.ai.entity.Message updateMsg = com.qny.ai.entity.Message.builder()
                            .id(messageId)
                            .audioUrl(audioUrl)
                            .build();
                    messageMapper.updateById(updateMsg);
                    
                    // 通知前端更新消息的audioUrl
                    notifyAudioUrlUpdate(messageId, audioUrl, session);
                    
                    log.info("[STREAMING_LLM] Async TTS completed for messageId: {}, audioUrl: {}", messageId, audioUrl);
                } else {
                    log.warn("[STREAMING_LLM] TTS returned empty result for messageId: {}", messageId);
                }
                
            } catch (Exception e) {
                log.error("[STREAMING_LLM] Failed to process async TTS for messageId: {}: ", messageId, e);
            }
        });
    }
    
    /**
     * 通知前端更新消息的audioUrl
     */
    private void notifyAudioUrlUpdate(Long messageId, String audioUrl, WebSocketSession session) {
        try {
            if (session != null && session.isOpen()) {
                ObjectNode message = objectMapper.createObjectNode();
                message.put("type", "audio_url_update");
                message.put("messageId", messageId);
                message.put("audioUrl", audioUrl);
                message.put("timestamp", System.currentTimeMillis());
                
                session.sendMessage(new TextMessage(message.toString()));
                log.info("[STREAMING_LLM] Notified frontend of audioUrl update: messageId={}, audioUrl={}", messageId, audioUrl);
            }
        } catch (Exception e) {
            log.error("[STREAMING_LLM] Failed to notify frontend of audioUrl update: ", e);
        }
    }
    
    private void sendStreamingEvent(WebSocketSession session, String type, String content) {
        sendStreamingEvent(session, type, content, null);
    }
    
    private void sendStreamingEvent(WebSocketSession session, String type, String content, String voiceType) {
        String sessionId = session.getId();
        Object lock = sessionLocks.computeIfAbsent(sessionId, k -> new Object());
        
        synchronized (lock) {
            try {
                // 检查WebSocket会话是否仍然打开
                if (!session.isOpen()) {
                    log.warn("[STREAMING_LLM] WebSocket session is closed, skipping event: {}", type);
                    return;
                }
                
                ObjectNode message = objectMapper.createObjectNode();
                message.put("type", type);
                message.put("content", content);
                message.put("timestamp", System.currentTimeMillis());
                if (voiceType != null) {
                    message.put("voiceType", voiceType);
                }
                
                session.sendMessage(new TextMessage(message.toString()));
                log.debug("[STREAMING_LLM] Sent event: {} - {}", type, content);
                
            } catch (Exception e) {
                log.error("[STREAMING_LLM] Failed to send streaming event: ", e);
            }
        }
    }
    
    /**
     * 检查并处理语音通话中的句子边界，实现实时TTS
     */
    private void checkAndProcessVoiceCallSentences(WebSocketSession session, String currentText) {
        try {
            String sessionId = session.getId();
            int lastProcessedPosition = sessionProcessedPositions.getOrDefault(sessionId, 0);
            
            // 检查是否有新的句子结束
            if (currentText.length() > lastProcessedPosition) {
                String newText = currentText.substring(lastProcessedPosition);
                
                // 查找句子结束位置
                int sentenceEndIndex = findSentenceEnd(newText);
                if (sentenceEndIndex > 0) {
                    // 提取完整句子
                    String sentence = newText.substring(0, sentenceEndIndex + 1).trim();
                    // 检查句子是否有效（不是纯标点符号，至少3个字符）
                    if (isValidSentence(sentence)) {
                        // 更新已处理位置
                        sessionProcessedPositions.put(sessionId, lastProcessedPosition + sentenceEndIndex + 1);
                        
                        // 获取角色的默认音色
                        String voiceType = "qiniu_zh_female_wwxkjx"; // 默认音色
                        try {
                            Long conversationId = sessionConversationIds.get(sessionId);
                            if (conversationId != null) {
                                Conversation conversation = conversationMapper.selectById(conversationId);
                                if (conversation != null && conversation.getRoleId() != null) {
                                    AppRole role = appRoleMapper.selectById(conversation.getRoleId());
                                    if (role != null && role.getDefaultVoiceType() != null) {
                                        voiceType = role.getDefaultVoiceType();
                                        log.info("[STREAMING_LLM] Using role default voice for real-time TTS: {}", voiceType);
                                    }
                                }
                            }
                        } catch (Exception e) {
                            log.warn("[STREAMING_LLM] Failed to get role default voice for real-time TTS: {}", e.getMessage());
                        }
                        
                        // 发送实时TTS请求
                        log.info("[STREAMING_LLM] Detected sentence boundary, sending real-time TTS request: {}", sentence);
                        sendStreamingEvent(session, "realtime_tts_request", sentence, voiceType);
                    } else {
                        // 句子无效（纯标点符号或太短），更新位置但不发送TTS
                        sessionProcessedPositions.put(sessionId, lastProcessedPosition + sentenceEndIndex + 1);
                        log.debug("[STREAMING_LLM] Invalid sentence, skipping TTS: {}", sentence);
                    }
                }
            }
        } catch (Exception e) {
            log.error("[STREAMING_LLM] Failed to process voice call sentences: ", e);
        }
    }
    
    /**
     * 查找句子结束位置
     * 优化后的句子检测，支持更多标点符号和更智能的检测
     */
    private int findSentenceEnd(String text) {
        for (int i = 0; i < text.length(); i++) {
            char c = text.charAt(i);
            // 中文标点符号
            if (c == '。' || c == '！' || c == '？' || c == '；' || c == '：') {
                return i;
            }
            // 英文标点符号
            if (c == '.' || c == '!' || c == '?' || c == ';' || c == ':') {
                return i;
            }
            // 换行符也作为句子结束
            if (c == '\n' || c == '\r') {
                return i;
            }
        }
        return -1;
    }
    
    /**
     * 检查句子是否有效（不是纯标点符号，有实际内容）
     */
    private boolean isValidSentence(String sentence) {
        if (sentence == null || sentence.trim().isEmpty()) {
            return false;
        }
        
        // 移除首尾空白字符
        String trimmed = sentence.trim();
        
        // 检查长度（至少3个字符）
        if (trimmed.length() < 3) {
            return false;
        }
        
        // 检查是否只包含标点符号和空白字符
        boolean hasContent = false;
        for (int i = 0; i < trimmed.length(); i++) {
            char c = trimmed.charAt(i);
            // 如果是字母、数字、中文字符，则认为有内容
            if (Character.isLetterOrDigit(c) || isChineseCharacter(c)) {
                hasContent = true;
                break;
            }
        }
        
        return hasContent;
    }
    
    /**
     * 检查是否为中文字符
     */
    private boolean isChineseCharacter(char c) {
        return (c >= 0x4E00 && c <= 0x9FFF) || // CJK统一汉字
               (c >= 0x3400 && c <= 0x4DBF) || // CJK扩展A
               (c >= 0x20000 && c <= 0x2A6DF) || // CJK扩展B
               (c >= 0x2A700 && c <= 0x2B73F) || // CJK扩展C
               (c >= 0x2B740 && c <= 0x2B81F) || // CJK扩展D
               (c >= 0x2B820 && c <= 0x2CEAF) || // CJK扩展E
               (c >= 0xF900 && c <= 0xFAFF) || // CJK兼容汉字
               (c >= 0x2F800 && c <= 0x2FA1F); // CJK兼容汉字补充
    }
}



