package com.och.mrcp.handler;

import com.och.mrcp.config.EngineConfig;
import com.och.mrcp.core.audio.AudioCodec;
import com.och.mrcp.core.mrcp.model.MrcpRequest;
import com.och.mrcp.core.rtp.session.RtpSession;
import com.och.mrcp.model.RecognitionResult;
import com.och.mrcp.service.MediaSessionManager;
import com.och.mrcp.service.UnifiedVoiceSessionManager;
import com.och.mrcp.service.VoiceSessionService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Lazy;
import org.springframework.stereotype.Component;
import jakarta.annotation.PreDestroy;

import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.LinkedList;
import java.util.Queue;

/**
 * 统一语音识别处理器
 * 整合了RTP音频路由和MRCP会话管理功能
 * 处理语音识别会话的完整生命周期
 */
@Slf4j
@Component
public class VoiceRecognitionSessionHandler {
    
    @Lazy
    @Autowired
    private UnifiedVoiceSessionManager sessionManager;

    @Lazy
    @Autowired  
    private MediaSessionManager mediaSessionManager;
    
    // 会话状态跟踪
    private final ConcurrentHashMap<String, SessionContext> activeSessions = new ConcurrentHashMap<>();
    // 预启动缓冲上限（约1秒 8k PCM，320B/20ms * 100 ≈ 32KB）
    private static final int MAX_PRESTART_BUFFER_BYTES = 32 * 1024;
    
    /**
     * 会话上下文
     */
    private static class SessionContext {
        private final String sessionId;
        private final AtomicBoolean isActive = new AtomicBoolean(false);
        private final AtomicBoolean isCompleted = new AtomicBoolean(false);
        private RecognitionResult lastResult;
        private long startTime;
        private long endTime;
        private CompletableFuture<Void> engineReadyFuture; // 引擎就绪Future
        // 引擎未就绪时的音频缓冲
        private final Queue<byte[]> preStartAudioBuffer = new LinkedList<>();
        private final AtomicLong preStartBufferedBytes = new AtomicLong(0);
        // 无音频守护参数
        private long noAudioTimeoutMs;
        private CompletableFuture<RecognitionResult> resultFutureRef;
        
        public SessionContext(String sessionId) {
            this.sessionId = sessionId;
            this.startTime = System.currentTimeMillis();
        }
        
        public void markCompleted() {
            isCompleted.set(true);
            endTime = System.currentTimeMillis();
        }
        
        public long getDuration() {
            return (endTime > 0 ? endTime : System.currentTimeMillis()) - startTime;
        }
    }
    
    /**
     * 处理RTP音频数据（来自VoiceRecognitionHandler的合并功能）
     * 将音频数据路由到活跃的语音识别会话
     * 
     * @param rtpSession  RTP会话
     * @param audioData   原始音频数据
     * @param payloadType RTP负载类型
     */
    public void processAudioData(RtpSession rtpSession, byte[] audioData, int payloadType) {
        String rtpSessionId = rtpSession.getSessionId();
        
        // 验证音频数据
        if (audioData == null) {
            log.warn("VoiceRecognitionSessionHandler: Received null audio data for RTP session: {}", rtpSessionId);
            return;
        }
        
        if (audioData.length == 0) {
            log.debug("VoiceRecognitionSessionHandler: Received empty audio data for RTP session: {}", rtpSessionId);
            return;
        }
        
        log.debug("Processing audio data for RTP session: {}, size: {} bytes, payload type: {} ({})", 
                rtpSessionId, audioData.length, payloadType, AudioCodec.getPayloadTypeInfo(payloadType));
        
        try {
            // 根据RTP会话ID获取对应的Call-ID
            String callId = mediaSessionManager.getCallIdByRtpSessionId(rtpSessionId);
            if (callId == null) {
                log.debug("No Call-ID found for RTP session: {}, audio data ignored", rtpSessionId);
                log.debug("Current active sessions: {}", activeSessions.keySet());
                return;
            }
            
            log.info("RTP session {} mapped to Call-ID: {}", rtpSessionId, callId);
            log.info("Current active sessions: {}", activeSessions.keySet());
            
            // 使用Call-ID检查是否有活跃的识别会话
            if (isSessionActive(callId)) {
                // 转换音频数据为PCM格式
                byte[] pcmAudioData = convertAudioToPcm(audioData, payloadType);
                if (pcmAudioData != null) {
                    // 如果引擎尚未就绪，则先缓冲，避免丢帧
                    SessionContext ctx = activeSessions.get(callId);
                    if (ctx != null && ctx.engineReadyFuture != null && !ctx.engineReadyFuture.isDone()) {
                        bufferPreStartAudio(ctx, callId, pcmAudioData);
                    } else {
                        // 发送转换后的PCM音频数据到活跃的识别会话
                        handleAudioData(callId, pcmAudioData);
                    }
                    log.debug("Audio data converted and sent to active recognition session: {} (RTP: {}), original: {} bytes, PCM: {} bytes", 
                            callId, rtpSessionId, audioData.length, pcmAudioData.length);
                } else {
                    log.warn("Failed to convert audio data for session: {} (RTP: {}), payload type: {}", 
                            callId, rtpSessionId, payloadType);
                }
            } else {
                // 如果没有活跃的识别会话，记录日志但不处理
                // 这种情况通常发生在MRCP RECOGNIZE请求之前
                log.debug("No active recognition session for Call-ID: {} (RTP: {}), audio data ignored", 
                        callId, rtpSessionId);
            }
            
        } catch (Exception e) {
            log.error("Error processing audio data for RTP session {}: {}", rtpSessionId, e.getMessage(), e);
        }
    }
    
    /**
     * 兼容性方法：处理RTP音频数据（无负载类型参数）
     * 为了保持向后兼容性，假设负载类型为PCMA (8)
     */
    public void processAudioData(RtpSession rtpSession, byte[] audioData) {
        log.debug("Processing audio data without payload type, assuming PCMA (8)");
        processAudioData(rtpSession, audioData, 8); // 默认使用PCMA
    }
    
    /**
     * 将音频数据转换为PCM格式
     * 
     * @param audioData   原始音频数据
     * @param payloadType RTP负载类型
     * @return 转换后的PCM音频数据，如果转换失败则返回null
     */
    private byte[] convertAudioToPcm(byte[] audioData, int payloadType) {
        try {
            // 检查是否支持该负载类型
            if (!AudioCodec.isSupported(payloadType)) {
                log.warn("Unsupported payload type for conversion: {} ({}), passing through original data", 
                        payloadType, AudioCodec.getPayloadTypeInfo(payloadType));
                return audioData; // 返回原始数据
            }
            
            // 转换为PCM
            byte[] pcmData = AudioCodec.convertToPcm(payloadType, audioData);
            
            if (pcmData != audioData) { // 如果发生了转换
                log.debug("Audio converted from {} to PCM: {} bytes -> {} bytes", 
                        AudioCodec.getPayloadTypeInfo(payloadType), audioData.length, pcmData.length);
            } else {
                log.debug("Audio data passed through without conversion for payload type: {}", 
                        AudioCodec.getPayloadTypeInfo(payloadType));
            }
            
            return pcmData;
            
        } catch (Exception e) {
            log.error("Error converting audio data from payload type {} to PCM: {}", payloadType, e.getMessage(), e);
            return null;
        }
    }

    /**
     * 处理MRCP RECOGNIZE请求
     * 这是语音识别的入口点
     */
    public CompletableFuture<RecognitionResult> handleRecognizeRequest(
            String sessionId, 
            EngineConfig.RecognitionConfig config,
            MrcpRequest mrcpRequest) {
        
        log.info("Handling RECOGNIZE request for session: {}", sessionId);
        
        CompletableFuture<RecognitionResult> resultFuture = new CompletableFuture<>();
        
        // 创建会话上下文
        SessionContext context = new SessionContext(sessionId);
        // 立即将会话标记为活跃，不等待异步回调
        context.isActive.set(true);
        activeSessions.put(sessionId, context);
        
        log.info("Recognition session created and marked as active: {}", sessionId);
        
        // 创建会话回调
        VoiceSessionService.SessionCallback callback = new VoiceSessionService.SessionCallback() {
            @Override
            public void onIntermediateResult(String sessionId, RecognitionResult result) {
                log.debug("Intermediate result for MRCP session {}: {}", sessionId, result.getText());
                
                // 可以在这里发送MRCP中间结果事件
                // sendMrcpEvent(sessionId, "RECOGNITION-IN-PROGRESS", result);
            }

            @Override
            public void onFinalResult(String sessionId, RecognitionResult result) {
                log.info("Final result for MRCP session {}: {}", sessionId, result.getText());
                
                SessionContext ctx = activeSessions.get(sessionId);
                if (ctx != null) {
                    ctx.lastResult = result;
                    // 优先停止并销毁底层ASR会话，避免阿里云端保持在StartTranscription导致IDLE_TIMEOUT
                    try {
                        sessionManager.stopRecognitionSession(sessionId)
                            .exceptionally(t -> { log.warn("Stop session failed for {}: {}", sessionId, t.getMessage()); return null; })
                            .thenCompose(v -> sessionManager.destroyRecognitionSession(sessionId))
                            .exceptionally(t -> { log.warn("Destroy session failed for {}: {}", sessionId, t.getMessage()); return null; })
                            .thenRun(ctx::markCompleted);
                    } catch (Exception e) {
                        log.warn("Error stopping/destroying session {} on final result: {}", sessionId, e.getMessage());
                        ctx.markCompleted();
                    }
                }
                
                // 完成Future
                resultFuture.complete(result);
                
                // 发送MRCP完成事件
                // sendMrcpEvent(sessionId, "RECOGNITION-COMPLETE", result);
            }

            @Override
            public void onError(String sessionId, Exception error) {
                log.error("Error in MRCP session {}: {}", sessionId, error.getMessage());
                
                SessionContext ctx = activeSessions.get(sessionId);
                if (ctx != null) {
                    try {
                        sessionManager.stopRecognitionSession(sessionId)
                            .exceptionally(t -> { log.warn("Stop session failed for {}: {}", sessionId, t.getMessage()); return null; })
                            .thenCompose(v -> sessionManager.destroyRecognitionSession(sessionId))
                            .exceptionally(t -> { log.warn("Destroy session failed for {}: {}", sessionId, t.getMessage()); return null; })
                            .thenRun(ctx::markCompleted);
                    } catch (Exception e) {
                        log.warn("Error stopping/destroying session {} on error: {}", sessionId, e.getMessage());
                        ctx.markCompleted();
                    }
                }
                
                // 完成Future with error
                resultFuture.completeExceptionally(error);
                
                // 发送MRCP错误事件
                // sendMrcpEvent(sessionId, "RECOGNITION-FAILED", error);
            }

            @Override
            public void onStateChanged(String sessionId, VoiceSessionService.SessionState oldState, VoiceSessionService.SessionState newState) {
                log.debug("MRCP session {} state changed: {} -> {}", sessionId, oldState, newState);
                
                SessionContext ctx = activeSessions.get(sessionId);
                if (ctx != null) {
                    if (newState == VoiceSessionService.SessionState.ACTIVE) {
                        ctx.isActive.set(true);
                    } else if (newState == VoiceSessionService.SessionState.STOPPED || 
                              newState == VoiceSessionService.SessionState.ERROR) {
                        ctx.isActive.set(false);
                    }
                }
            }
        };
        
        // 预检查媒体会话状态，仅记录告警（RTP会在MRCP处理层启动）
        try {
            com.och.mrcp.service.MediaSessionManager.MediaSession media = mediaSessionManager.getMediaSession(sessionId);
            if (media == null || !media.isConfirmed() || !media.isRtpActive()) {
                log.warn("Media session not ready for {} (confirmed={}, rtpActive={}), recognition will start with no-audio watchdog.",
                        sessionId, media != null && media.isConfirmed(), media != null && media.isRtpActive());
            }
        } catch (Exception ignored) {}

        // 创建并启动识别会话
        CompletableFuture<Void> engineReadyFuture = new CompletableFuture<>();
        context.engineReadyFuture = engineReadyFuture;
        context.resultFutureRef = resultFuture;
        context.noAudioTimeoutMs = Math.max(500, config != null ? config.getSilenceTimeout() : 1000);
        
        sessionManager.createRecognitionSession(sessionId, config, callback, null, mrcpRequest)
            .thenCompose(v -> sessionManager.startRecognitionSession(sessionId))
            .thenRun(() -> {
                // 引擎启动成功，标记为就绪
                log.info("Engine ready for session: {}", sessionId);
                engineReadyFuture.complete(null);
                // 引擎就绪后刷新预启动缓冲的音频
                flushPreStartAudioBuffer(sessionId);
            })
            .exceptionally(throwable -> {
                log.error("Failed to create/start recognition session: {}", sessionId, throwable);
                resultFuture.completeExceptionally(throwable);
                engineReadyFuture.completeExceptionally(throwable);
                activeSessions.remove(sessionId);
                return null;
            });
        
        // 无音频超时守护：引擎ready后，若T毫秒未收到音频，则主动结束，避免云侧IDLE_TIMEOUT
        scheduleNoAudioWatchdog(sessionId, context.noAudioTimeoutMs, resultFuture);
        
        return resultFuture;
    }
    

    /**
     * 等待引擎就绪
     * 返回一个Future，当引擎启动完成并准备好接收音频数据时完成
     */
    public CompletableFuture<Void> waitForEngineReady(String sessionId) {
        SessionContext context = activeSessions.get(sessionId);
        if (context == null) {
            log.warn("No session context found for session: {}", sessionId);
            return CompletableFuture.failedFuture(new IllegalStateException("Session not found: " + sessionId));
        }
        
        if (context.engineReadyFuture == null) {
            log.warn("No engine ready future found for session: {}", sessionId);
            return CompletableFuture.failedFuture(new IllegalStateException("Engine ready future not found: " + sessionId));
        }
        
        return context.engineReadyFuture;
    }

    /**
     * 处理音频数据（来自RTP流）
     * 这个方法会被RTP处理器调用
     */
    public void handleAudioData(String sessionId, byte[] audioData) {
        SessionContext context = activeSessions.get(sessionId);
        if (context == null || !context.isActive.get()) {
            log.debug("Ignoring audio data for inactive session: {}", sessionId);
            return;
        }
        // 如果底层会话进入STOPPING/STOPPED，则丢弃音频，避免云侧报错
        try {
            VoiceSessionService.SessionState st = sessionManager.getSessionState(sessionId);
            if (st == VoiceSessionService.SessionState.STOPPING || st == VoiceSessionService.SessionState.STOPPED) {
                log.debug("Dropping audio for {} because session state is {}", sessionId, st);
                return;
            }
        } catch (Exception ignored) {}
        
        // 收到音频即认为会话有输入，取消/延后无音频超时
        resetNoAudioWatchdog(sessionId);
        
        // 发送音频数据到识别会话
        sessionManager.sendAudioData(sessionId, audioData)
            .exceptionally(throwable -> {
                log.warn("Failed to send audio data for session: {}", sessionId, throwable);
                return null;
            });
    }

    // ===== 无音频超时守护实现 =====
    private final ConcurrentHashMap<String, java.util.concurrent.ScheduledFuture<?>> noAudioTimers = new ConcurrentHashMap<>();
    private final java.util.concurrent.ScheduledThreadPoolExecutor watchdogScheduler = new java.util.concurrent.ScheduledThreadPoolExecutor(1, r -> {
        Thread t = new Thread(r, "no-audio-watchdog");
        t.setDaemon(true);
        return t;
    });
    
    private void scheduleNoAudioWatchdog(String sessionId, long timeoutMs, CompletableFuture<RecognitionResult> resultFuture) {
        try {
            cancelNoAudioWatchdog(sessionId);
            java.util.concurrent.ScheduledFuture<?> future = watchdogScheduler.schedule(() -> {
                try {
                    SessionContext ctx = activeSessions.get(sessionId);
                    if (ctx == null || !ctx.isActive.get() || ctx.isCompleted.get()) {
                        return; // 会话已结束/非活跃，忽略
                    }
                    log.warn("No audio received within {}ms for session {}, stopping to avoid cloud IDLE_TIMEOUT", timeoutMs, sessionId);
                    // 主动停止并销毁，避免云侧空闲超时
                    sessionManager.stopRecognitionSession(sessionId)
                        .exceptionally(t -> { log.debug("Stop on no-audio failed for {}: {}", sessionId, t.getMessage()); return null; })
                        .thenCompose(v -> sessionManager.destroyRecognitionSession(sessionId))
                        .exceptionally(t -> { log.debug("Destroy on no-audio failed for {}: {}", sessionId, t.getMessage()); return null; })
                        .thenRun(() -> {
                            if (ctx != null) ctx.markCompleted();
                            // 返回空结果，避免上游长时间等待
                            RecognitionResult empty = new RecognitionResult();
                            empty.setText("");
                            empty.setConfidence(0.0);
                            empty.setFinal(true);
                            resultFuture.complete(empty);
                        });
                } catch (Exception e) {
                    log.debug("No-audio watchdog error for {}: {}", sessionId, e.getMessage());
                }
            }, timeoutMs, java.util.concurrent.TimeUnit.MILLISECONDS);
            noAudioTimers.put(sessionId, future);
        } catch (Exception e) {
            log.debug("Failed to schedule no-audio watchdog for {}: {}", sessionId, e.getMessage());
        }
    }
    
    private void resetNoAudioWatchdog(String sessionId) {
        try {
            java.util.concurrent.ScheduledFuture<?> future = noAudioTimers.get(sessionId);
            if (future != null && !future.isDone()) {
                // 重新调度：取消旧任务，按相同阈值重置
                cancelNoAudioWatchdog(sessionId);
                SessionContext ctx = activeSessions.get(sessionId);
                if (ctx != null) {
                    scheduleNoAudioWatchdog(sessionId, ctx.noAudioTimeoutMs, ctx.resultFutureRef);
                }
            }
        } catch (Exception ignored) {}
    }
    
    private void cancelNoAudioWatchdog(String sessionId) {
        try {
            java.util.concurrent.ScheduledFuture<?> future = noAudioTimers.remove(sessionId);
            if (future != null) {
                future.cancel(false);
            }
        } catch (Exception ignored) {}
    }

    /**
     * 在引擎未就绪时缓冲音频，防止在发送IN-PROGRESS后到引擎ACTIVE之间的丢帧
     */
    private void bufferPreStartAudio(SessionContext context, String sessionId, byte[] pcmAudioData) {
        try {
            long current = context.preStartBufferedBytes.get();
            if (current + pcmAudioData.length > MAX_PRESTART_BUFFER_BYTES) {
                // 超出容量：丢弃最早的帧直到有空间
                while (current + pcmAudioData.length > MAX_PRESTART_BUFFER_BYTES && !context.preStartAudioBuffer.isEmpty()) {
                    byte[] removed = context.preStartAudioBuffer.poll();
                    if (removed != null) {
                        current = context.preStartBufferedBytes.addAndGet(-removed.length);
                    }
                }
                if (current + pcmAudioData.length > MAX_PRESTART_BUFFER_BYTES) {
                    log.debug("Pre-start buffer full for session {}, dropping incoming frame of {} bytes", sessionId, pcmAudioData.length);
                    return;
                }
            }
            context.preStartAudioBuffer.offer(pcmAudioData);
            context.preStartBufferedBytes.addAndGet(pcmAudioData.length);
        } catch (Exception e) {
            log.warn("Error buffering pre-start audio for session {}: {}", sessionId, e.getMessage());
        }
    }

    /**
     * 引擎就绪后刷新预启动缓冲的音频
     */
    private void flushPreStartAudioBuffer(String sessionId) {
        SessionContext context = activeSessions.get(sessionId);
        if (context == null) {
            return;
        }
        try {
            int flushed = 0;
            long bytes = 0;
            while (!context.preStartAudioBuffer.isEmpty()) {
                byte[] frame = context.preStartAudioBuffer.poll();
                if (frame != null && frame.length > 0) {
                    bytes += frame.length;
                    handleAudioData(sessionId, frame);
                    flushed++;
                }
            }
            context.preStartBufferedBytes.set(0);
            if (flushed > 0) {
                log.info("Flushed {} pre-start audio frames ({} bytes) for session {} after engine ready", flushed, bytes, sessionId);
            }
        } catch (Exception e) {
            log.warn("Error flushing pre-start audio for session {}: {}", sessionId, e.getMessage());
        }
    }
    
    /**
     * 处理MRCP STOP请求
     */
    public CompletableFuture<Void> handleStopRequest(String sessionId) {
        log.info("Handling STOP request for session: {}", sessionId);
        
        SessionContext context = activeSessions.get(sessionId);
        if (context != null) {
            context.isActive.set(false);
        }
        
        return sessionManager.stopRecognitionSession(sessionId);
    }
    
    /**
     * 处理会话终止（来自SIP BYE或MRCP连接断开）
     */
    public CompletableFuture<Void> handleSessionTerminationInternal(String sessionId) {
        log.info("Handling session termination for: {}", sessionId);
        
        // 清理会话上下文
        SessionContext context = activeSessions.remove(sessionId);
        if (context != null) {
            context.markCompleted();
            log.info("Session {} terminated after {}ms", sessionId, context.getDuration());
        }
        
        // 销毁识别会话
        return sessionManager.destroyRecognitionSession(sessionId);
    }
    
    /**
     * 获取会话状态信息
     */
    public String getSessionInfo(String sessionId) {
        SessionContext context = activeSessions.get(sessionId);
        if (context == null) {
            return "Session not found: " + sessionId;
        }
        
        VoiceSessionService.SessionState state = sessionManager.getSessionState(sessionId);
        return String.format("Session{id=%s, state=%s, active=%s, duration=%dms, lastResult=%s}",
            sessionId, state, context.isActive.get(), context.getDuration(),
            context.lastResult != null ? context.lastResult.getText() : "none");
    }
    
    /**
     * 获取所有活跃会话的信息
     */
    public void logActiveSessionsInfo() {
        log.info("Active MRCP sessions: {}", activeSessions.size());
        activeSessions.forEach((sessionId, context) -> {
            log.info("  {}", getSessionInfo(sessionId));
        });
        
        log.info("Session manager info: {}", sessionManager.getServiceInfo());
    }
    
    /**
     * 清理所有会话
     */
    @PreDestroy
    public void cleanup() {
        log.info("Cleaning up all voice recognition sessions");
        
        activeSessions.forEach((sessionId, context) -> {
            try {
                handleSessionTerminationInternal(sessionId);
            } catch (Exception e) {
                log.warn("Error cleaning up session: {}", sessionId, e);
            }
        });
        
        activeSessions.clear();
        sessionManager.cleanupAllSessions();
        try {
            watchdogScheduler.shutdownNow();
        } catch (Exception ignored) {}
    }
    
    /**
     * 获取活跃会话数量
     */
    public int getActiveSessionCount() {
        return (int) activeSessions.values().stream()
            .filter(ctx -> ctx.isActive.get() && !ctx.isCompleted.get())
            .count();
    }
    
    /**
     * 检查会话是否活跃
     */
    public boolean isSessionActive(String sessionId) {
        SessionContext context = activeSessions.get(sessionId);
        return context != null && context.isActive.get() && !context.isCompleted.get();
    }
}
