package com.och.mrcp.service.impl;

import com.alibaba.nls.client.protocol.InputFormatEnum;
import com.alibaba.nls.client.protocol.NlsClient;
import com.alibaba.nls.client.protocol.SampleRateEnum;
import com.alibaba.nls.client.protocol.asr.SpeechTranscriber;
import com.alibaba.nls.client.protocol.asr.SpeechTranscriberListener;
import com.alibaba.nls.client.protocol.asr.SpeechTranscriberResponse;
import com.och.mrcp.config.EngineConfig;
import com.och.mrcp.model.RecognitionResult;
import com.och.mrcp.service.VoiceSessionService;
import lombok.extern.slf4j.Slf4j;

import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;

/**
 * 阿里云语音识别会话
 * 管理单个语音识别会话的完整生命周期
 */
@Slf4j
public class AliyunAsrSession {
    
    private final String sessionId;
    private final String appKey;
    private final NlsClient nlsClient;
    private final EngineConfig.RecognitionConfig config;
    private final VoiceSessionService.SessionCallback callback;
    
    private SpeechTranscriber transcriber;
    private VoiceSessionService.SessionState state = VoiceSessionService.SessionState.CREATED;
    private final AtomicBoolean isTranscriberStarted = new AtomicBoolean(false);
    private final ReentrantLock stateLock = new ReentrantLock();
    
    private final long createTime;
    private long startTime;
    private long endTime;
    
    // 统计信息
    private int audioPacketsReceived = 0;
    private long totalAudioBytes = 0;
    private String lastRecognitionResult = "";
    
    public AliyunAsrSession(String sessionId, 
                           String appKey, 
                           NlsClient nlsClient, 
                           EngineConfig.RecognitionConfig config,
                           VoiceSessionService.SessionCallback callback) {
        this.sessionId = sessionId;
        this.appKey = appKey;
        this.nlsClient = nlsClient;
        this.config = config;
        this.callback = callback;
        this.createTime = System.currentTimeMillis();
        
        log.info("Created ASR session: {}", sessionId);
    }
    
    /**
     * 启动识别会话
     */
    public CompletableFuture<Void> start() {
        return CompletableFuture.runAsync(() -> {
            stateLock.lock();
            try {
                if (state != VoiceSessionService.SessionState.CREATED) {
                    throw new IllegalStateException("Session " + sessionId + " is not in CREATED state: " + state);
                }
                
                setState(VoiceSessionService.SessionState.STARTING);
                
                log.info("Starting ASR session: {}", sessionId);
                
                // 创建语音识别实例
                transcriber = new SpeechTranscriber(nlsClient, createTranscriberListener());
                
                // 配置识别参数
                transcriber.setAppKey(appKey);
                transcriber.setFormat(InputFormatEnum.PCM);
                transcriber.setSampleRate(SampleRateEnum.SAMPLE_RATE_8K);
                transcriber.setEnableIntermediateResult(true); // 启用中间结果
                transcriber.setEnablePunctuation(config.isEnablePunctuation());
                transcriber.setEnableITN(config.isEnableInverseText());
                
                // 设置高级参数（可选）
                // transcriber.addCustomedParam("max_sentence_silence", 800);
                // transcriber.addCustomedParam("enable_semantic_sentence_detection", false);
                
                // 启动识别
                try {
                    transcriber.start();
                    isTranscriberStarted.set(true);
                    startTime = System.currentTimeMillis();
                } catch (Exception e) {
                    log.error("Failed to start transcriber for session: {}", sessionId, e);
                    throw e;
                }
                
                setState(VoiceSessionService.SessionState.ACTIVE);
                
                log.info("ASR session started successfully: {}", sessionId);
                
            } catch (Exception e) {
                setState(VoiceSessionService.SessionState.ERROR);
                log.error("Failed to start ASR session: {}", sessionId, e);
                throw new RuntimeException("Failed to start ASR session: " + sessionId, e);
            } finally {
                stateLock.unlock();
            }
        });
    }
    
    /**
     * 发送音频数据
     */
    public CompletableFuture<Void> sendAudioData(byte[] audioData) {
        return CompletableFuture.runAsync(() -> {
            // 1. 首先检查音频数据有效性
            if (audioData == null) {
                log.warn("Cannot send null audio data to session: {}", sessionId);
                return;
            }
            
            if (audioData.length == 0) {
                log.debug("Received empty audio data for session: {}, ignoring", sessionId);
                return;
            }
            
            // 2. 检查会话状态
            if (state != VoiceSessionService.SessionState.ACTIVE) {
                log.warn("Cannot send audio data to session {} in state: {}", sessionId, state);
                return;
            }
            
            // 3. 检查转录器状态
            if (transcriber == null || !isTranscriberStarted.get()) {
                log.warn("Transcriber not ready for session: {}", sessionId);
                return;
            }
            
            try {
                // 4. 发送音频数据到阿里云ASR
                transcriber.send(audioData, audioData.length);
                audioPacketsReceived++;
                totalAudioBytes += audioData.length;
                
                log.debug("Sent {} bytes to ASR session: {}, total packets: {}, total bytes: {}", 
                    audioData.length, sessionId, audioPacketsReceived, totalAudioBytes);
                
            } catch (Exception e) {
                log.error("Failed to send audio data to session: {}, audioData size: {}", 
                    sessionId, audioData != null ? audioData.length : "null", e);
                setState(VoiceSessionService.SessionState.ERROR);
                if (callback != null) {
                    callback.onError(sessionId, e);
                }
            }
        });
    }
    
    /**
     * 停止识别会话
     */
    public CompletableFuture<Void> stop() {
        return CompletableFuture.runAsync(() -> {
            stateLock.lock();
            try {
                if (state != VoiceSessionService.SessionState.ACTIVE) {
                    log.debug("Session {} is not active, current state: {}", sessionId, state);
                    return;
                }
                
                setState(VoiceSessionService.SessionState.STOPPING);
                
                log.info("Stopping ASR session: {}", sessionId);
                
                if (transcriber != null && isTranscriberStarted.get()) {
                    transcriber.stop();
                    endTime = System.currentTimeMillis();
                    
                    log.info("ASR session stopped: {}, duration: {}ms, packets: {}, bytes: {}", 
                        sessionId, getDuration(), audioPacketsReceived, totalAudioBytes);
                }
                
                setState(VoiceSessionService.SessionState.STOPPED);
                
            } catch (Exception e) {
                setState(VoiceSessionService.SessionState.ERROR);
                log.error("Failed to stop ASR session: {}", sessionId, e);
                if (callback != null) {
                    callback.onError(sessionId, e);
                }
            } finally {
                stateLock.unlock();
            }
        });
    }
    
    /**
     * 清理资源
     */
    public void cleanup() {
        stateLock.lock();
        try {
            log.info("Cleaning up ASR session: {}", sessionId);
            
            if (transcriber != null) {
                try {
                    if (state == VoiceSessionService.SessionState.ACTIVE) {
                        transcriber.stop();
                    }
                    transcriber.close();
                } catch (Exception e) {
                    log.warn("Error closing transcriber for session: {}", sessionId, e);
                }
            }
            
            isTranscriberStarted.set(false);
            
            if (state != VoiceSessionService.SessionState.STOPPED) {
                setState(VoiceSessionService.SessionState.STOPPED);
            }
            
            log.info("ASR session cleaned up: {}, final stats - packets: {}, bytes: {}, duration: {}ms", 
                sessionId, audioPacketsReceived, totalAudioBytes, getDuration());
                
        } finally {
            stateLock.unlock();
        }
    }
    
    /**
     * 创建语音识别监听器
     */
    private SpeechTranscriberListener createTranscriberListener() {



        return new SpeechTranscriberListener() {
            @Override
            public void onTranscriptionResultChange(SpeechTranscriberResponse response) {
                // 中间结果回调
                if (callback != null) {
                    RecognitionResult result = createRecognitionResult(response, false);
                    callback.onIntermediateResult(sessionId, result);
                }
                
                log.debug("Intermediate result for session {}: {}", sessionId, response.getTransSentenceText());
            }

            @Override
            public void onTranscriberStart(SpeechTranscriberResponse response) {
                log.debug("Transcriber started for session {}: task_id={}", sessionId, response.getTaskId());
            }

            @Override
            public void onSentenceBegin(SpeechTranscriberResponse response) {
                log.debug("Sentence begin for session {}: index={}", sessionId, response.getTransSentenceIndex());
            }

            @Override
            public void onSentenceEnd(SpeechTranscriberResponse response) {
                // 句子结束，这通常是一个完整的识别结果
                if (callback != null) {
                    RecognitionResult result = createRecognitionResult(response, true);
                    lastRecognitionResult = result.getText();
                    callback.onFinalResult(sessionId, result);
                }
                
                log.debug("Sentence end for session {}: {}", sessionId, response.getTransSentenceText());
            }

            @Override
            public void onTranscriptionComplete(SpeechTranscriberResponse response) {
                log.debug("Transcription completed for session {}: task_id={}", sessionId, response.getTaskId());
                
                // 如果没有识别到任何内容，发送空结果
                if (lastRecognitionResult.isEmpty() && callback != null) {
                    RecognitionResult emptyResult = new RecognitionResult();
                    emptyResult.setText("");
                    emptyResult.setConfidence(0.0);
                    emptyResult.setDuration(getDuration());
                    emptyResult.setLanguage(config.getLanguage());
                    emptyResult.setFinal(true);
                    callback.onFinalResult(sessionId, emptyResult);
                }
            }

            @Override
            public void onFail(SpeechTranscriberResponse response) {
                log.error("Transcription failed for session {}: status={}, message={}", 
                    sessionId, response.getStatus(), response.getStatusText());
                
                setState(VoiceSessionService.SessionState.ERROR);
                
                if (callback != null) {
                    Exception error = new RuntimeException("ASR failed: " + response.getStatusText());
                    callback.onError(sessionId, error);
                }
            }
        };
    }
    
    /**
     * 创建识别结果对象
     */
    private RecognitionResult createRecognitionResult(SpeechTranscriberResponse response, boolean isFinal) {
        RecognitionResult result = new RecognitionResult();
        result.setText(response.getTransSentenceText());
        result.setConfidence(response.getConfidence());
        result.setDuration(response.getTransSentenceTime());
        result.setLanguage(config.getLanguage());
        result.setTaskId(response.getTaskId());
        result.setBeginTime(response.getSentenceBeginTime());
        result.setFinal(isFinal);
        return result;
    }
    
    /**
     * 设置会话状态并通知回调
     */
    private void setState(VoiceSessionService.SessionState newState) {
        VoiceSessionService.SessionState oldState = this.state;
        this.state = newState;
        
        if (callback != null && oldState != newState) {
            try {
                callback.onStateChanged(sessionId, oldState, newState);
            } catch (Exception e) {
                log.warn("Error notifying state change for session {}: {} -> {}", sessionId, oldState, newState, e);
            }
        }
    }
    
    // Getters
    public String getSessionId() { return sessionId; }
    public VoiceSessionService.SessionState getState() { return state; }
    public long getCreateTime() { return createTime; }
    public long getStartTime() { return startTime; }
    public long getEndTime() { return endTime; }
    public long getDuration() { 
        if (startTime == 0) {
            return 0;
        }
        return (endTime > 0 ? endTime : System.currentTimeMillis()) - startTime; 
    }
    public int getAudioPacketsReceived() { return audioPacketsReceived; }
    public long getTotalAudioBytes() { return totalAudioBytes; }
    public String getLastRecognitionResult() { return lastRecognitionResult; }
    
    @Override
    public String toString() {
        return String.format("AliyunAsrSession{id=%s, state=%s, duration=%dms, packets=%d, bytes=%d}", 
            sessionId, state, getDuration(), audioPacketsReceived, totalAudioBytes);
    }
}
