package com.qny.ai.service.impl;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.qny.ai.service.StreamingASRService;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;
import org.springframework.web.socket.TextMessage;
import org.springframework.web.socket.WebSocketSession;

import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;

@Slf4j
@Service
@RequiredArgsConstructor
public class StreamingASRServiceImpl implements StreamingASRService {
    
    private final QiniuVoiceService qiniuVoiceService;
    private final KodoStorageService kodoStorageService;
    private final ObjectMapper objectMapper = new ObjectMapper();
    
    // 存储每个会话的音频缓冲区
    private final Map<String, StringBuilder> sessionAudioBuffers = new ConcurrentHashMap<>();
    // 存储每个会话的ASR结果
    private final Map<String, AtomicReference<String>> sessionASRResults = new ConcurrentHashMap<>();
    
    // 取消ASR限流，提高响应速度
    // private volatile long lastAsrRequestTime = 0;
    // private final long ASR_MIN_INTERVAL_MS = 1000;
    // private volatile int asrRequestCount = 0;
    // private volatile long lastResetTime = System.currentTimeMillis();
    // private final int MAX_REQUESTS_PER_MINUTE = 30;
    
    @Override
    public CompletableFuture<String> processStreamingASR(byte[] audioData, String format, WebSocketSession session, String sessionId) {
        return CompletableFuture.supplyAsync(() -> {
            try {
                log.info("[STREAMING_ASR] Processing audio data for session: {}", sessionId);
                
                // 发送ASR开始信号
                sendASREvent(session, "asr_start", "开始语音识别...");
                
                // 统一使用WAV格式，提高ASR兼容性和稳定性
                String ext, asrFormat;
                if (format.equals("wav") || format.equals("audio/wav")) {
                    // WAV格式，七牛云ASR原生支持
                    ext = "wav";
                    asrFormat = "wav";
                } else if (format.equals("webm") || format.equals("audio/webm") || format.equals("audio/webm;codecs=opus")) {
                    // WebM格式，转换为WAV格式处理
                    ext = "wav";
                    asrFormat = "wav";
                } else if (format.equals("mp4") || format.equals("audio/mp4") || format.equals("audio/mp4;codecs=opus")) {
                    // MP4格式，转换为WAV格式处理
                    ext = "wav";
                    asrFormat = "wav";
                } else if (format.equals("ogg") || format.equals("audio/ogg")) {
                    // OGG格式，转换为WAV格式处理
                    ext = "wav";
                    asrFormat = "wav";
                } else {
                    // 其他格式，统一使用WAV
                    ext = "wav";
                    asrFormat = "wav";
                }
                
                String url = kodoStorageService.uploadBytes(audioData, ext);
                log.info("[STREAMING_ASR] Audio uploaded to: {} (format: {}, asrFormat: {})", url, ext, asrFormat);
                
                // 取消限流，直接进行ASR请求
                
                // 调用ASR服务
                log.info("[STREAMING_ASR] Calling ASR with format: " + asrFormat + ", url: " + url);
                String asrResult = qiniuVoiceService.asrByUrl(url, asrFormat);
                
                // 如果ASR失败，进行重试（无限流）
                if (asrResult == null || asrResult.trim().isEmpty()) {
                    log.info("[STREAMING_ASR] First attempt failed, retrying with same format...");
                    
                    log.info("[STREAMING_ASR] Retrying with format: " + asrFormat);
                    asrResult = qiniuVoiceService.asrByUrl(url, asrFormat);
                    if (asrResult != null && !asrResult.trim().isEmpty()) {
                        log.info("[STREAMING_ASR] SUCCESS on retry with format: " + asrFormat);
                    } else {
                        log.warn("[STREAMING_ASR] FAILED on retry with format: " + asrFormat);
                    }
                }
                
                if (asrResult != null && !asrResult.trim().isEmpty()) {
                    // 发送ASR结果
                    sendASREvent(session, "asr_final", asrResult);
                    log.info("[STREAMING_ASR] ASR result: {}", asrResult);
                    return asrResult;
                } else {
                    // 发送ASR错误
                    sendASREvent(session, "asr_error", "语音识别失败");
                    log.warn("[STREAMING_ASR] ASR returned empty result");
                    return "";
                }
                
            } catch (Exception e) {
                log.error("[STREAMING_ASR] Failed to process ASR: ", e);
                sendASREvent(session, "asr_error", "语音识别时发生错误: " + e.getMessage());
                return "";
            }
        });
    }
    
    @Override
    public void processAudioChunk(byte[] audioChunk, String sessionId, boolean isFinal) {
        try {
            // 获取或创建会话的音频缓冲区
            StringBuilder buffer = sessionAudioBuffers.computeIfAbsent(sessionId, k -> new StringBuilder());
            
            // 将音频数据转换为Base64并添加到缓冲区
            String base64Chunk = java.util.Base64.getEncoder().encodeToString(audioChunk);
            buffer.append(base64Chunk);
            
            log.debug("[STREAMING_ASR] Processed audio chunk for session: {}, size: {}, isFinal: {}", 
                     sessionId, audioChunk.length, isFinal);
            
            // 如果是最终块，处理完整的音频数据
            if (isFinal) {
                String fullAudioBase64 = buffer.toString();
                byte[] fullAudioData = java.util.Base64.getDecoder().decode(fullAudioBase64);
                
                // 异步处理完整的音频数据
                CompletableFuture.runAsync(() -> {
                    try {
                        // 这里可以调用流式ASR处理
                        // 暂时使用现有的ASR服务
                        log.info("[STREAMING_ASR] Processing final audio chunk for session: {}", sessionId);
                        
                        // 清理缓冲区
                        sessionAudioBuffers.remove(sessionId);
                        
                    } catch (Exception e) {
                        log.error("[STREAMING_ASR] Failed to process final audio chunk: ", e);
                    }
                });
            }
            
        } catch (Exception e) {
            log.error("[STREAMING_ASR] Failed to process audio chunk: ", e);
        }
    }
    
    @Override
    public void cleanupSession(String sessionId) {
        sessionAudioBuffers.remove(sessionId);
        sessionASRResults.remove(sessionId);
        log.info("[STREAMING_ASR] Cleaned up session: {}", sessionId);
    }
    
    private void sendASREvent(WebSocketSession session, String type, String content) {
        try {
            ObjectNode message = objectMapper.createObjectNode();
            message.put("type", type);
            message.put("content", content);
            message.put("timestamp", System.currentTimeMillis());
            
            session.sendMessage(new TextMessage(message.toString()));
            log.debug("[STREAMING_ASR] Sent event: {} - {}", type, content);
            
        } catch (Exception e) {
            log.error("[STREAMING_ASR] Failed to send ASR event: ", e);
        }
    }
}
