package com.bnd.service.impl;

import cn.hutool.core.util.StrUtil;
import com.alibaba.dashscope.audio.tts.SpeechSynthesisResult;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesisAudioFormat;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesisParam;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesizer;
import com.alibaba.dashscope.audio.ttsv2.enrollment.Voice;
import com.alibaba.dashscope.audio.ttsv2.enrollment.VoiceEnrollmentService;
import com.alibaba.dashscope.common.ResultCallback;
import com.alibaba.dashscope.exception.InputRequiredException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.bnd.common.exception.SseEmitterExceptionHandler;
import com.bnd.common.holder.VoiceIdHolder;
import com.bnd.config.AiConfig;
import com.bnd.domain.session.VoiceSender;
import com.bnd.service.VoiceService;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;
import org.springframework.web.servlet.mvc.method.annotation.SseEmitter;

import java.io.ByteArrayOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;

/**
 * 音色服务实现类
 */
@Slf4j
@Service
public class VoiceServiceImpl implements VoiceService {
    @Resource
    private AiConfig aiConfig;
    /**
     * 音色命名空间
     */
    private static final String prefix = "custom";
    private static final String targetModel = "cosyvoice-v2";
    private static final String NORMAL_PROMPT = "欢迎使用AI角色扮演平台，希望这里多样化的角色扮演可以给您带来不一样的体验，感谢您的使用，祝您工作顺利，生活愉快。";

    /**
     * 创建音色
     * @param url 音频文件地址，音频文件上传后会返回一个音频文件地址
     * @return 音色id
     * @throws NoApiKeyException
     * @throws InputRequiredException
     */
    @Override
    public String generateVoice(String url) throws NoApiKeyException, InputRequiredException {
        //1. 复制音色
        VoiceEnrollmentService service = new VoiceEnrollmentService(aiConfig.getDashscope().getApiKey());
        Voice voice = service.createVoice(targetModel, prefix, url);
        log.info("[创建音色] 音色id: {}", voice.getVoiceId());

        //2. 将音色加入音色列表
        VoiceIdHolder.addVoiceId(voice.getVoiceId());
        return StrUtil.isNotBlank(voice.getVoiceId()) ? voice.getVoiceId() : "";
    }

    /**
     * 根据音色ID播放音频
     * @param voiceId 音色id
     * @return SseEmitter
     */
    @Override
    public SseEmitter playVoiceByVoiceId(String voiceId){
        return playVoice(voiceId, NORMAL_PROMPT);
    }

    /**
     * 根据文本播放音频
     *
     * @param voiceId 音色ID
     * @param text    要合成的文本
     * @return SseEmitter，用于向前端推送音频流
     */
    @Override
    public SseEmitter playVoice(String voiceId, String text) {
        // 1. 创建 VoiceSender（封装 emitter、状态、锁等）
        VoiceSender voiceSender = new VoiceSender();
        SseEmitter emitter = voiceSender.getEmitter();
        Object lock = voiceSender.getLock();

        // 2. 参数校验
        if (!VoiceIdHolder.isValidVoiceId(voiceId)) {
            voiceId = aiConfig.getDashscope().getSsVoice(); // 使用默认音色
        }

        if (StrUtil.isBlank(text)) {
            SseEmitterExceptionHandler.sendError(emitter, "参数错误：请输入正确的文本！");
            return emitter;
        }

        // 3. 定义回调（必须能访问 synthesizer，所以声明为 final）
        final SpeechSynthesizer[] synthesizerRef = new SpeechSynthesizer[1]; // 用于在回调中访问
        // 调试用：累积 PCM 数据
        final ByteArrayOutputStream pcmOutputStream = new ByteArrayOutputStream();

        ResultCallback<SpeechSynthesisResult> callback = new ResultCallback<SpeechSynthesisResult>() {
            @Override
            public void onEvent(SpeechSynthesisResult result) {
                try {
                    ByteBuffer audioFrame = result.getAudioFrame();
                    if (audioFrame == null || !audioFrame.hasRemaining()) {
                        log.info("[SSE推送音频数据] 无音频数据");
                        return;
                    }

                    byte[] frameBytes = new byte[audioFrame.remaining()];
                    audioFrame.get(frameBytes); // 读取到数组

                    ByteBuffer safeAudioFrame = ByteBuffer.wrap(frameBytes); // 重新包装
                    int frameSize = frameBytes.length;

                    synchronized (lock) {
                        if (!voiceSender.canSend()) {
                            log.info("[SSE推送音频数据] 状态为不可发送，音频数据大小: {}", frameSize);
                            return;
                        }

                        try {
                            emitter.send(SseEmitter.event()
                                    .name("audio")
                                    .data(safeAudioFrame)
                                    .build());
                            pcmOutputStream.write(frameBytes);
                            log.info("[SSE推送音频数据] 成功发送音频帧，大小: {} 字节", frameSize);
                        } catch (IOException | RuntimeException e) {
                            log.debug("发送音频帧失败，可能客户端已断开", e);
                            voiceSender.getCanSend().set(false); // 标记不可发送
                        }
                    }
                } catch (Exception e) {
                    log.error("处理语音合成音频帧时发生异常", e);
                }
            }

            @Override
            public void onComplete() {
                log.info("语音合成完成回调触发");
                closeResources(synthesizerRef[0], voiceSender);

                try {
                    emitter.complete();
                } catch (Exception ex) {
                    // 忽略，可能已关闭
                }
            }

            @Override
            public void onError(Exception e) {
                log.error("语音合成出现异常", e);
                closeResources(synthesizerRef[0], voiceSender);
                SseEmitterExceptionHandler.sendError(emitter, "语音合成异常，请稍后再试！");
            }
        };

        // 4. 构建参数并创建 synthesizer
        SpeechSynthesisParam param = SpeechSynthesisParam.builder()
                .apiKey(aiConfig.getDashscope().getApiKey())
                .model(aiConfig.getDashscope().getSsModel())
                .format(SpeechSynthesisAudioFormat.PCM_22050HZ_MONO_16BIT)
                .voice(voiceId)
                .build();

        SpeechSynthesizer synthesizer = new SpeechSynthesizer(param, callback);
        synthesizerRef[0] = synthesizer; // 使回调能访问

        // 5. 注册 SSE 生命周期监听
        emitter.onCompletion(() -> {
            log.info("SSE 连接由客户端关闭");
            voiceSender.getCanSend().set(false);
            // 注意：不要在这里 close WebSocket，应由合成器回调统一处理
        });

        emitter.onError(throwable -> {
            log.error("SSE 连接异常", throwable);
            voiceSender.getCanSend().set(false);
        });

        // 6. 启动流式合成（异步，不要在 finally 中关闭！）
        try {
            synthesizer.streamingCall(text);
            synthesizer.streamingComplete(); // 告诉服务端输入结束，可以开始合成剩余部分
        } catch (Exception e) {
            log.error("启动语音合成失败", e);
            closeResources(synthesizer, voiceSender);
            SseEmitterExceptionHandler.sendError(emitter, "启动语音合成失败，请稍后再试！");
        }

        return emitter;
    }

    /**
     * 安全关闭资源
     */
    private void closeResources(SpeechSynthesizer synthesizer, VoiceSender voiceSender) {
        if (voiceSender == null) return;

        // 1. 标记不可发送
        voiceSender.getCanSend().set(false);

        // 2. 关闭 WebSocket 连接（阿里云 SDK）
        if (synthesizer != null && synthesizer.getDuplexApi() != null) {
            try {
                log.info("[关闭 WebSocket] 尝试关闭 WebSocket 连接");
                synthesizer.getDuplexApi().close(1000, "合成完成或异常终止");
            } catch (Exception ex) {
                log.warn("关闭 WebSocket 连接时发生异常", ex);
            }
        }
    }

    /**
     * 保存pcm文件到文件夹
     * @param pcmStream 待保存的音频
     */
    private void savePcmToFile(ByteArrayOutputStream pcmStream) {
        if (pcmStream == null || pcmStream.size() == 0) {
            log.warn("PCM 数据为空，跳过保存");
            return;
        }

        try {
            Path debugDir = Paths.get("debug-audio");
            Files.createDirectories(debugDir);

            String timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMdd-HHmmss"));
            byte[] pcmData = pcmStream.toByteArray();
            int pcmSize = pcmData.length;

            // === 1. 保存原始 PCM（可选，用于深度分析）===
            Path pcmPath = debugDir.resolve("voice_" + timestamp + ".pcm");
            try (FileOutputStream fos = new FileOutputStream(pcmPath.toFile())) {
                fos.write(pcmData);
            }
            log.info("✅ 原始 PCM 已保存: {}", pcmPath.toAbsolutePath());

            // === 2. 保存为 WAV（可直接播放！）===
            Path wavPath = debugDir.resolve("voice_" + timestamp + ".wav");
            try (FileOutputStream wavFos = new FileOutputStream(wavPath.toFile())) {
                writeWavHeader(wavFos, pcmSize, 16000, (short) 1, (short) 16);
                wavFos.write(pcmData); // 写入 PCM 数据
            }
            log.info("🎧 可播放 WAV 已生成: {}", wavPath.toAbsolutePath());
            log.info("💡 提示：双击该 WAV 文件即可播放，验证音频是否完整");

        } catch (Exception e) {
            log.error("❌ 保存调试音频文件失败", e);
        }
    }

    /**
     * 写入标准 WAV 文件头
     *
     * @param out       输出流
     * @param dataLen   PCM 数据长度（字节）
     * @param sampleRate 采样率（如 22050）
     * @param channels  声道数（1=mono, 2=stereo）
     * @param bitsPerSample 位深（如 16）
     */
    private void writeWavHeader(FileOutputStream out, int dataLen, int sampleRate, short channels, short bitsPerSample)
            throws IOException {
        short blockAlign = (short) (channels * bitsPerSample / 8);
        int byteRate = sampleRate * blockAlign;

        ByteBuffer header = ByteBuffer.allocate(44);
        header.order(ByteOrder.LITTLE_ENDIAN);

        // RIFF header
        header.put("RIFF".getBytes());                    // 0-3
        header.putInt(36 + dataLen);                      // 4-7: 文件总长度 - 8
        header.put("WAVE".getBytes());                    // 8-11

        // fmt chunk
        header.put("fmt ".getBytes());                    // 12-15
        header.putInt(16);                                // 16-19: fmt chunk 长度
        header.putShort((short) 1);                       // 20-21: 音频格式 (1 = PCM)
        header.putShort(channels);                        // 22-23
        header.putInt(sampleRate);                        // 24-27
        header.putInt(byteRate);                          // 28-31
        header.putShort(blockAlign);                      // 32-33
        header.putShort(bitsPerSample);                   // 34-35

        // data chunk
        header.put("data".getBytes());                    // 36-39
        header.putInt(dataLen);                           // 40-43

        out.write(header.array());
    }
}
