package com.voiceqsologger.service;

import cn.smartjavaai.common.entity.Language;
import cn.smartjavaai.common.entity.R;
import cn.smartjavaai.speech.asr.config.AsrModelConfig;
import cn.smartjavaai.speech.asr.entity.AsrResult;
import cn.smartjavaai.speech.asr.entity.WhisperParams;
import cn.smartjavaai.speech.asr.enums.AsrModelEnum;
import cn.smartjavaai.speech.asr.factory.SpeechRecognizerFactory;
import cn.smartjavaai.speech.asr.model.SpeechRecognizer;
import com.voiceqsologger.config.AsrWhisperProperties;
import com.voiceqsologger.config.AsrModeProperties;
import com.voiceqsologger.config.AsrSegmentationProperties;
import io.github.givimad.whisperjni.WhisperFullParams;
import io.github.givimad.whisperjni.WhisperSamplingStrategy;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;

import java.io.File;
import java.io.FileOutputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.function.Consumer;

/**
 * 流式识别服务：简单分段缓冲写入临时 wav 文件后调用离线识别
 * 说明：Whisper-JNI 无原生流式接口，这里采用短时分段(例如每 3-5 秒)聚合后调用一次。
 */
@Slf4j
@Service
@RequiredArgsConstructor
public class StreamingAsrService {

    private final AsrWhisperProperties properties;
    private final AsrModeProperties modeProperties;
    private final AsrSegmentationProperties segProperties;
    private final ApiAsrClient apiAsrClient;
    private final ExecutorService executor = Executors.newSingleThreadExecutor();
    private volatile SpeechRecognizer cachedRecognizer;

    static class SessionState {
        String lang;
        int sampleRate;
        Path tempDir;
        FileOutputStream fos;
        long lastFlushTs;
        long lastVoiceTs;
        boolean hasSpeech;
        boolean recognizing;
        String mode; // local | api
    }

    private final Map<String, SessionState> sessions = new ConcurrentHashMap<>();

    public void start(String sessionId, int sampleRate, String lang, String mode) throws Exception {
        stop(sessionId);
        SessionState s = new SessionState();
        s.lang = lang;
        s.sampleRate = sampleRate;
        s.tempDir = Files.createTempDirectory("asr_" + sessionId + "_");
        File raw = s.tempDir.resolve("buffer.pcm").toFile();
        s.fos = new FileOutputStream(raw, true);
        long now = System.currentTimeMillis();
        s.lastFlushTs = now;
        s.lastVoiceTs = now;
        s.hasSpeech = false;
        s.recognizing = false;
        String globalMode = modeProperties.getMode();
        s.mode = (mode == null || mode.isEmpty()) ? (globalMode == null ? "local" : globalMode.toLowerCase()) : mode.toLowerCase();
        sessions.put(sessionId, s);
    }

    public void append(String sessionId, byte[] pcmChunk,
                       Consumer<String> onPartial,
                       Consumer<String> onFinal,
                       Runnable onAsrStart) throws Exception {
        SessionState s = sessions.get(sessionId);
        if (s == null) return;
        synchronized (s) {
            // 识别线程可能刚关闭了写入流，这里若为 null 则立刻重建
            if (s.fos == null) {
                Path pcmPath = s.tempDir.resolve("buffer.pcm");
                s.fos = new FileOutputStream(pcmPath.toFile(), true);
            }
            s.fos.write(pcmChunk);
            long now = System.currentTimeMillis();
            boolean voice = isVoiceChunk(pcmChunk);
            if (voice) {
                s.lastVoiceTs = now;
                s.hasSpeech = true;
            }
            // 当检测到静音持续超过阈值且之前存在语音段，触发一次识别
            long silenceMs = now - s.lastVoiceTs;
            final long MIN_SILENCE_MS = segProperties.getMinSilenceMs();
            if (!voice && s.hasSpeech && silenceMs >= MIN_SILENCE_MS && !s.recognizing) {
                s.recognizing = true;
                if (onAsrStart != null) {
                    try { onAsrStart.run(); } catch (Exception ignore) {}
                }
                executor.submit(() -> {
                    String finalText;
                    try {
                        finalText = recognizeOnce(s, true);
                    } catch (Exception e) {
                        finalText = null;
                    }
                    synchronized (s) {
                        s.hasSpeech = false; // 本段已消耗
                        s.recognizing = false;
                    }
                    if (finalText != null && !finalText.isEmpty()) {
                        onFinal.accept(finalText);
                    }
                });
            }
        }
    }

    public void stop(String sessionId) {
        SessionState s = sessions.remove(sessionId);
        if (s == null) return;
        try {
            synchronized (s) {
                if (s.fos != null) {
                    s.fos.flush();
                    s.fos.close();
                }
                // 对剩余缓冲做最后一次识别，停止阶段无需重建写入流
                if (s.hasSpeech && !s.recognizing) {
                    s.recognizing = true;
                    try {
                        recognizeOnce(s, false);
                    } finally {
                        s.recognizing = false;
                        s.hasSpeech = false;
                    }
                }
            }
        } catch (Exception e) {
            log.warn("stop recognize error", e);
        }
        try {
            Files.walk(s.tempDir)
                    .map(Path::toFile)
                    .sorted((a, b) -> -a.compareTo(b))
                    .forEach(File::delete);
        } catch (Exception ignore) {}
    }

    private String recognizeOnce(SessionState s, boolean reopenAfter) throws Exception {
        // 将当前 PCM 缓冲转为 WAV（16kHz mono 16bit）并直接以内存字节调用识别
        Path pcm = s.tempDir.resolve("buffer.pcm");
        if (!Files.exists(pcm) || Files.size(pcm) == 0) return null;

        // 读取并清空 PCM：在 Windows 需要先关闭流，处理后再根据需要重建
        if (s.fos != null) {
            s.fos.flush();
            s.fos.close();
            s.fos = null;
        }
        byte[] pcmData = Files.readAllBytes(pcm);
        // 空段或纯静音不触发识别
        if (pcmData.length < 4 || !containsVoice(pcmData)) {
            try (FileOutputStream truncate = new FileOutputStream(pcm.toFile(), false)) { }
            if (reopenAfter) {
                s.fos = new FileOutputStream(pcm.toFile(), true);
            }
            return null;
        }
        byte[] wavBytes = buildWavBytes(s.sampleRate, pcmData);
        // 截断原始 PCM 缓冲文件
        try (FileOutputStream truncate = new FileOutputStream(pcm.toFile(), false)) {
            // 打开即清空
        }
        if (reopenAfter) {
            s.fos = new FileOutputStream(pcm.toFile(), true);
        }

        // 根据模式调用：api 或 local
        if ("api".equalsIgnoreCase(s.mode)) {
            long startTime = System.currentTimeMillis();
            String text = apiAsrClient.transcribe(wavBytes, "segment.wav");
            if (text != null && !text.isEmpty()) {
                log.info("api success cost:{}, text:{}", System.currentTimeMillis() - startTime, text);
                return text;
            }
            log.info("api segment empty");
            return null;
        }

        SpeechRecognizer recognizer = getOrCreateRecognizer();
        WhisperParams params = new WhisperParams();
        //语言：中文
        params.setLanguage(Language.ZH);
        /**
         * 解码搜索策略类型：
         * GREEDY - 贪婪解码，逐步选择概率最高的结果；
         * BEAN_SEARCH - Beam 搜索，保留多个候选路径以提高准确性。
         */
        WhisperFullParams fullParams = new WhisperFullParams(WhisperSamplingStrategy.BEAN_SEARCH);
        //语言
        fullParams.language = Language.ZH.getCode();
        //线程数，设为 0 表示使用最大核心数。
        fullParams.nThreads = 0;
        //解码器使用的历史文本作为提示的最大 token 数。
        fullParams.nMaxTextCtx = 16384;
        //解码起始偏移（毫秒）
        fullParams.offsetMs = 0;
        //解码持续时长（毫秒），超过此长度的音频将被截断
        fullParams.durationMs = 0;
        //是否翻译为英文
        fullParams.translate = false;
        // 初始提示：引导输出简体中文并偏向通联关键词与格式
        fullParams.initialPrompt = (
                "你是业余无线电通联转写助手，仅输出简体中文，不输出解释。" +
                "尽量保留呼号等英文大写与数字，不添加多余标点。" +
                "常见关键词：QTH(位置)、RST/信号报告(59/579/599)、RIG(设备型号)、ANT/天线、功率。" +
                "示例短语：'这里BD1BWV，信号报告59，QTH北京，RIG IC-7300，ANT 八木天线。' " +
                "'呼号BD1ABC，RST 59，位置上海，设备 FT-891，天线 棒杆天线。' "
        );
        //禁用上下文链接，不使用前一段解码结果作为上下文
        fullParams.noContext = true;
        //是否强制仅输出一个段落（适用于短语音）
        fullParams.singleSegment = false;
        //是否打印特殊标记
        fullParams.printSpecial = false;
        //是否直接从 whisper.cpp 中打印结果（不推荐，建议使用回调方式替代）
        fullParams.printRealtime = false;
        //抑制非语音 token输出
        fullParams.suppressNonSpeechTokens = false;
        //更多参数请查看官网：https://github.com/GiviMAD/whisper-jni/blob/33854520b1f0b3697106a7932a2fd64e8191bca9/src/main/java/io/github/givimad/whisperjni/WhisperFullParams.java
        params.setParams(fullParams);
        log.info("start recognize....................wavBytes:{}",wavBytes.length);
        long startTime = System.currentTimeMillis();
        R<AsrResult> result = recognizer.recognize(wavBytes, params);
        if (result.isSuccess()) {
            log.info("success cost:{},text:{}",System.currentTimeMillis()-startTime,result.getData().getText());
            return result.getData().getText();
        } else {
            log.info("segment failed: {}", result.getMessage());
            return null;
        }
    }

    private byte[] buildWavBytes(int sampleRate, byte[] pcmData) {
        int byteRate = sampleRate * 2;
        int totalDataLen = pcmData.length + 36;
        byte[] header = new byte[44];
        header[0] = 'R'; header[1] = 'I'; header[2] = 'F'; header[3] = 'F';
        header[4] = (byte) (totalDataLen & 0xff);
        header[5] = (byte) ((totalDataLen >> 8) & 0xff);
        header[6] = (byte) ((totalDataLen >> 16) & 0xff);
        header[7] = (byte) ((totalDataLen >> 24) & 0xff);
        header[8] = 'W'; header[9] = 'A'; header[10] = 'V'; header[11] = 'E';
        header[12] = 'f'; header[13] = 'm'; header[14] = 't'; header[15] = ' ';
        header[16] = 16; header[17] = 0; header[18] = 0; header[19] = 0;
        header[20] = 1; header[21] = 0; // PCM
        header[22] = 1; header[23] = 0; // Mono
        header[24] = (byte) (sampleRate & 0xff);
        header[25] = (byte) ((sampleRate >> 8) & 0xff);
        header[26] = (byte) ((sampleRate >> 16) & 0xff);
        header[27] = (byte) ((sampleRate >> 24) & 0xff);
        header[28] = (byte) (byteRate & 0xff);
        header[29] = (byte) ((byteRate >> 8) & 0xff);
        header[30] = (byte) ((byteRate >> 16) & 0xff);
        header[31] = (byte) ((byteRate >> 24) & 0xff);
        header[32] = 2; header[33] = 0;
        header[34] = 16; header[35] = 0;
        header[36] = 'd'; header[37] = 'a'; header[38] = 't'; header[39] = 'a';
        header[40] = (byte) (pcmData.length & 0xff);
        header[41] = (byte) ((pcmData.length >> 8) & 0xff);
        header[42] = (byte) ((pcmData.length >> 16) & 0xff);
        header[43] = (byte) ((pcmData.length >> 24) & 0xff);

        byte[] out = new byte[header.length + pcmData.length];
        System.arraycopy(header, 0, out, 0, header.length);
        System.arraycopy(pcmData, 0, out, header.length, pcmData.length);
        return out;
    }

    private boolean containsVoice(byte[] pcm16le) {
        return isVoiceChunk(pcm16le);
    }

    /**
     * 能量阈值判断：简单 RMS 检测，避免太频繁触发识别
     */
    private boolean isVoiceChunk(byte[] pcm16le) {
        if (pcm16le == null || pcm16le.length < 2) return false;
        long sum = 0;
        int samples = 0;
        for (int i = 0; i + 1 < pcm16le.length; i += 2) {
            int lo = pcm16le[i] & 0xff;
            int hi = pcm16le[i + 1];
            int v = (hi << 8) | lo;
            sum += (long) v * v;
            samples++;
        }
        if (samples == 0) return false;
        double rms = Math.sqrt(sum / (double) samples);
        // 阈值：经验值，针对 16bit PCM；可按需调节
        return rms > 500.0;
    }

    private SpeechRecognizer getOrCreateRecognizer() {
        if (cachedRecognizer != null) return cachedRecognizer;
        synchronized (this) {
            if (cachedRecognizer == null) {
                AsrModelConfig config = new AsrModelConfig();
                config.setModelEnum(AsrModelEnum.WHISPER);
                config.setModelPath(properties.getModelPath());
                cachedRecognizer = SpeechRecognizerFactory.getInstance().getModel(config);
            }
        }
        return cachedRecognizer;
    }
}


