package com.voicecomm.recognition;

import com.voicecomm.audio.AudioProcessor;
import com.voicecomm.audio.AudioRecorder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.ByteArrayOutputStream;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;

/**
 * 实时语音处理器
 * 负责实时处理音频数据并进行语音识别
 * 
 * @author VoiceComm Team
 * @version 1.0.0
 */
public class RealTimeVoiceProcessor {
    
    private static final Logger logger = LoggerFactory.getLogger(RealTimeVoiceProcessor.class);
    
    // 音频处理相关
    private final AudioRecorder audioRecorder;
    private final AudioProcessor audioProcessor;
    private final VoiceRecognitionService recognitionService;
    
    // 处理控制
    private final AtomicBoolean isProcessing = new AtomicBoolean(false);
    private final AtomicLong processedFrames = new AtomicLong(0);
    
    // 音频缓冲区
    private final BlockingQueue<byte[]> audioBuffer = new LinkedBlockingQueue<>();
    private final ByteArrayOutputStream accumulatedAudio = new ByteArrayOutputStream();
    // 共享累积缓冲的并发保护锁，避免读写竞态造成断句异常或数据丢失
    private final Object accumulateLock = new Object();
    
    // 处理线程
    private Thread processingThread;
    private Thread recognitionThread;
    
    // 配置参数
    private final int frameSize;
    private final int maxBufferSize;
    private final double silenceThreshold;
    private final int minSilenceDuration;
    private final int recognitionInterval;
    private final double vadEnergyThreshold;
    private final int minSpeechDuration;
    private final boolean segmentOnSilence;
    // 断句优化：两次识别最小间隔与最大整句时长（毫秒）
    private final int minGapBetweenRecognitionsMillis;
    private final int maxSegmentDurationMillis;
    private volatile long lastRecognitionTime = 0L;
    // 防重入：上一轮识别进行中则跳过新的触发
    private final java.util.concurrent.atomic.AtomicBoolean recognitionInFlight = new java.util.concurrent.atomic.AtomicBoolean(false);
    // 音频增强参数：目标 RMS（0 表示不归一化）
    private final double targetRms;
    
    // 回调接口
    private RecognitionCallback recognitionCallback;
    
    /**
     * 构造函数
     * 
     * @param audioRecorder 音频录制器
     * @param audioProcessor 音频处理器
     * @param recognitionService 语音识别服务
     * @param config 处理配置
     */
    public RealTimeVoiceProcessor(AudioRecorder audioRecorder, 
                                AudioProcessor audioProcessor,
                                VoiceRecognitionService recognitionService,
                                ProcessingConfig config) {
        this.audioRecorder = audioRecorder;
        this.audioProcessor = audioProcessor;
        this.recognitionService = recognitionService;
        
        this.frameSize = config.getFrameSize();
        this.maxBufferSize = config.getMaxBufferSize();
        this.silenceThreshold = config.getSilenceThreshold();
        this.minSilenceDuration = config.getMinSilenceDuration();
        this.recognitionInterval = config.getRecognitionInterval();
        this.vadEnergyThreshold = config.getVadEnergyThreshold();
        this.minSpeechDuration = config.getMinSpeechDuration();
        this.segmentOnSilence = config.isSegmentOnSilence();
        this.minGapBetweenRecognitionsMillis = config.getMinGapBetweenRecognitionsMillis();
        this.maxSegmentDurationMillis = config.getMaxSegmentDurationMillis();
        this.targetRms = config.getTargetRms();
    }
    
    /**
     * 开始实时语音处理
     * 
     * @param callback 识别结果回调
     */
    public void startProcessing(RecognitionCallback callback) {
        if (isProcessing.get()) {
            logger.warn("实时语音处理已在进行中");
            return;
        }
        
        this.recognitionCallback = callback;
        isProcessing.set(true);
        
        // 启动音频录制
        audioRecorder.startRecording(this::onAudioData);
        
        // 启动处理线程
        startProcessingThread();
        // 仅在非“静音边界整句”模式下启用定时识别线程，减少并发复杂度
        if (!segmentOnSilence) {
            startRecognitionThread();
        }
        
        logger.info("实时语音处理已启动");
    }
    
    /**
     * 停止实时语音处理
     */
    public void stopProcessing() {
        if (!isProcessing.get()) {
            logger.warn("实时语音处理未在进行中");
            return;
        }
        
        isProcessing.set(false);
        
        // 停止音频录制
        audioRecorder.stopRecording();
        
        // 中断处理线程
        if (processingThread != null) {
            processingThread.interrupt();
        }
        if (recognitionThread != null) {
            recognitionThread.interrupt();
        }
        
        // 清空缓冲区
        audioBuffer.clear();
        
        logger.info("实时语音处理已停止，共处理 {} 帧音频数据", processedFrames.get());
    }
    
    /**
     * 音频数据回调处理
     * 
     * @param audioData 音频数据
     */
    private void onAudioData(byte[] audioData) {
        if (audioData != null && audioData.length > 0) {
            try {
                // 将音频数据添加到缓冲区
                if (audioBuffer.size() < maxBufferSize) {
                    audioBuffer.offer(audioData);
                } else {
                    logger.warn("音频缓冲区已满，丢弃音频数据");
                }
            } catch (Exception e) {
                logger.error("处理音频数据失败", e);
            }
        }
    }
    
    /**
     * 启动音频处理线程
     */
    private void startProcessingThread() {
        processingThread = new Thread(() -> {
            logger.info("音频处理线程已启动");
            
            long speechAccumulatedBytes = 0;
            long voicedStreakMillis = 0;   // 连续有声时长
            long silenceStreakMillis = 0;  // 连续静音时长
            while (isProcessing.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    byte[] audioData = audioBuffer.poll(100, java.util.concurrent.TimeUnit.MILLISECONDS);
                    if (audioData != null) {
                        boolean voiced = isVoiceActive(audioData, vadEnergyThreshold);
                        long now = System.currentTimeMillis();
                        int frameMillis = audioData.length / 32; // 16k/16bit/mono => 32 bytes/ms
                        if (!voiced) {
                            // 累计静音时长，清零有声连续计数
                            silenceStreakMillis += frameMillis;
                            voicedStreakMillis = 0;
                            // 无语音：在静音边界整句触发（满足最小时长+冷却期+静音持续达阈值）
                            int currentAccumulated;
                            synchronized (accumulateLock) { currentAccumulated = accumulatedAudio.size(); }
                            if (segmentOnSilence && currentAccumulated > 0 &&
                                speechAccumulatedBytes >= bytesFromMillis(minSpeechDuration) &&
                                silenceStreakMillis >= minSilenceDuration && canRecognizeNow(now)) {
                                triggerRecognition();
                                lastRecognitionTime = now;
                                speechAccumulatedBytes = 0;
                                silenceStreakMillis = 0;
                            }
                            continue;
                        }

                        // 有声：累计有声时长，清零静音连续计数
                        voicedStreakMillis += frameMillis;
                        silenceStreakMillis = 0;
                        processAudioFrame(audioData);
                        speechAccumulatedBytes += audioData.length;
                        processedFrames.incrementAndGet();

                        // 语音段过长保护：超过最大整句时长，强制触发一次
                        if (segmentOnSilence && maxSegmentDurationMillis > 0 &&
                            speechAccumulatedBytes >= bytesFromMillis(maxSegmentDurationMillis) && canRecognizeNow(now)) {
                            triggerRecognition();
                            lastRecognitionTime = now;
                            speechAccumulatedBytes = 0;
                            voicedStreakMillis = 0;
                            silenceStreakMillis = 0;
                        }
                    }
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                } catch (Exception e) {
                    logger.error("音频处理线程异常", e);
                }
            }
            
            logger.info("音频处理线程已结束");
        });
        
        processingThread.setName("AudioProcessor");
        processingThread.start();
    }
    
    /**
     * 启动语音识别线程
     */
    private void startRecognitionThread() {
        recognitionThread = new Thread(() -> {
            logger.info("语音识别线程已启动");
            
            while (isProcessing.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    Thread.sleep(recognitionInterval);
                    if (!segmentOnSilence) {
                        // 定时识别模式：达到最小语音时长才识别
                        if (accumulatedAudio.size() >= bytesFromMillis(minSpeechDuration) && canRecognizeNow(System.currentTimeMillis())) {
                            triggerRecognition();
                            lastRecognitionTime = System.currentTimeMillis();
                        }
                    }
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                } catch (Exception e) {
                    logger.error("语音识别线程异常", e);
                }
            }
            
            logger.info("语音识别线程已结束");
        });
        
        recognitionThread.setName("VoiceRecognition");
        recognitionThread.start();
    }
    
    /**
     * 处理音频帧
     * 
     * @param audioData 音频数据
     */
    private void processAudioFrame(byte[] audioData) {
        try {
            // 语音增强：降噪 + RMS 归一化
            byte[] processedData = audioProcessor.enhanceSpeech(audioData, silenceThreshold, targetRms);
            
            // 检测静音段
            AudioProcessor.SilenceSegment[] silenceSegments = audioProcessor.detectSilence(
                processedData, silenceThreshold, minSilenceDuration);
            
            // 如果检测到静音段，将静音前的数据添加到累积缓冲区
            if (silenceSegments.length > 0) {
                int silenceStart = silenceSegments[0].getStartPosition();
                if (silenceStart > 0) {
                    byte[] speechData = new byte[silenceStart];
                    System.arraycopy(processedData, 0, speechData, 0, silenceStart);
                    synchronized (accumulateLock) { accumulatedAudio.write(speechData); }
                }
                // 如果启用静音分段：一旦检测到静音且累积语音达到最小时长，立即触发识别
                int size;
                synchronized (accumulateLock) { size = accumulatedAudio.size(); }
                if (segmentOnSilence && size >= bytesFromMillis(minSpeechDuration)) {
                    triggerRecognition();
                }
            } else {
                // 没有检测到静音，直接添加到累积缓冲区
                synchronized (accumulateLock) { accumulatedAudio.write(processedData); }
            }
            
        } catch (Exception e) {
            logger.error("处理音频帧失败", e);
        }
    }

    /**
     * 简单能量VAD：计算帧内样本平均绝对值，与阈值比较
     */
    private boolean isVoiceActive(byte[] pcm16le, double energyThreshold) {
        if (pcm16le == null || pcm16le.length < 2) return false;
        long sum = 0;
        int samples = 0;
        for (int i = 0; i + 1 < pcm16le.length; i += 2) {
            short sample = (short) ((pcm16le[i + 1] << 8) | (pcm16le[i] & 0xFF));
            sum += Math.abs(sample);
            samples++;
        }
        if (samples == 0) return false;
        double avg = (double) sum / samples;
        return avg >= energyThreshold;
    }

    private int bytesFromMillis(int millis) {
        // 16kHz, 16bit, mono => 32000 bytes/sec => 32 bytes/ms
        return millis * 32;
    }

    private void triggerRecognition() {
        try {
            // 避免并发重入：上一轮识别未完成则跳过
            if (!recognitionInFlight.compareAndSet(false, true)) {
                return;
            }
            byte[] audioData;
            synchronized (accumulateLock) {
                audioData = accumulatedAudio.toByteArray();
                accumulatedAudio.reset();
            }
            if (audioData.length == 0) return;
            recognitionService.recognizeAsync(audioData)
                .thenAccept(this::handleRecognitionResult)
                .whenComplete((r, ex) -> recognitionInFlight.set(false));
        } catch (Exception e) {
            logger.error("触发识别失败", e);
            recognitionInFlight.set(false);
        }
    }

    private boolean canRecognizeNow(long nowMs) {
        return (nowMs - lastRecognitionTime) >= Math.max(0, minGapBetweenRecognitionsMillis);
    }
    
    /**
     * 处理识别结果
     * 
     * @param result 识别结果
     */
    private void handleRecognitionResult(VoiceRecognitionService.RecognitionResult result) {
        if (result.isSuccess() && !result.getText().trim().isEmpty()) {
            // logger.info("识别到语音: {}", result.getText());
            
            if (recognitionCallback != null) {
                try {
                    recognitionCallback.onRecognitionResult(result);
                } catch (Exception e) {
                    logger.error("处理识别结果回调失败", e);
                }
            }
        } else if (!result.isSuccess()) {
            logger.debug("语音识别失败: {}", result.getErrorMessage());
        }
    }
    
    /**
     * 检查是否正在处理
     * 
     * @return true如果正在处理，false否则
     */
    public boolean isProcessing() {
        return isProcessing.get();
    }
    
    /**
     * 获取已处理的帧数
     * 
     * @return 已处理的帧数
     */
    public long getProcessedFrames() {
        return processedFrames.get();
    }
    
    /**
     * 释放资源
     */
    public void cleanup() {
        stopProcessing();
        
        if (accumulatedAudio != null) {
            try {
                accumulatedAudio.close();
            } catch (Exception e) {
                logger.warn("关闭累积音频流失败", e);
            }
        }
        
        logger.info("实时语音处理器资源已释放");
    }
    
    /**
     * 识别结果回调接口
     */
    @FunctionalInterface
    public interface RecognitionCallback {
        /**
         * 当识别到语音时调用
         * 
         * @param result 识别结果
         */
        void onRecognitionResult(VoiceRecognitionService.RecognitionResult result);
    }
    
    /**
     * 处理配置类
     */
    public static class ProcessingConfig {
        private int frameSize = 4096;
        private int maxBufferSize = 100;
        private double silenceThreshold = 1000.0;
        private int minSilenceDuration = 500; // 毫秒
        private int recognitionInterval = 2000; // 毫秒
        private double vadEnergyThreshold = 1200.0;
        private int minSpeechDuration = 400; // 毫秒
        private boolean segmentOnSilence = true;
        // 目标 RMS，用于增强（0 表示不做归一化）
        private double targetRms = 3000.0;
        // 断句优化参数
        private int minGapBetweenRecognitionsMillis = 2000; // 两次识别最小间隔
        private int maxSegmentDurationMillis = 9000;        // 单句最大时长
        
        public ProcessingConfig() {}
        
        public int getFrameSize() {
            return frameSize;
        }
        
        public void setFrameSize(int frameSize) {
            this.frameSize = frameSize;
        }
        
        public int getMaxBufferSize() {
            return maxBufferSize;
        }
        
        public void setMaxBufferSize(int maxBufferSize) {
            this.maxBufferSize = maxBufferSize;
        }
        
        public double getSilenceThreshold() {
            return silenceThreshold;
        }
        
        public void setSilenceThreshold(double silenceThreshold) {
            this.silenceThreshold = silenceThreshold;
        }
        
        public int getMinSilenceDuration() {
            return minSilenceDuration;
        }
        
        public void setMinSilenceDuration(int minSilenceDuration) {
            this.minSilenceDuration = minSilenceDuration;
        }
        
        public int getRecognitionInterval() {
            return recognitionInterval;
        }
        
        public void setRecognitionInterval(int recognitionInterval) {
            this.recognitionInterval = recognitionInterval;
        }

        public double getVadEnergyThreshold() { return vadEnergyThreshold; }
        public void setVadEnergyThreshold(double vadEnergyThreshold) { this.vadEnergyThreshold = vadEnergyThreshold; }
        public int getMinSpeechDuration() { return minSpeechDuration; }
        public void setMinSpeechDuration(int minSpeechDuration) { this.minSpeechDuration = minSpeechDuration; }
        public boolean isSegmentOnSilence() { return segmentOnSilence; }
        public void setSegmentOnSilence(boolean segmentOnSilence) { this.segmentOnSilence = segmentOnSilence; }

        public double getTargetRms() { return targetRms; }
        public void setTargetRms(double targetRms) { this.targetRms = targetRms; }

        public int getMinGapBetweenRecognitionsMillis() { return minGapBetweenRecognitionsMillis; }
        public void setMinGapBetweenRecognitionsMillis(int minGapBetweenRecognitionsMillis) { this.minGapBetweenRecognitionsMillis = minGapBetweenRecognitionsMillis; }
        public int getMaxSegmentDurationMillis() { return maxSegmentDurationMillis; }
        public void setMaxSegmentDurationMillis(int maxSegmentDurationMillis) { this.maxSegmentDurationMillis = maxSegmentDurationMillis; }
    }
}
