package com.qiniu.aiplayroles.service;

import com.alibaba.dashscope.audio.tts.SpeechSynthesisResult;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesisParam;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesizer;
import com.alibaba.dashscope.common.ResultCallback;
import com.qiniu.aiplayroles.config.VoiceConfig;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Mono;

import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;

@Service
@RequiredArgsConstructor
@Slf4j
public class VoiceService {

    private final VoiceConfig voiceConfig;

    private static final DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS");

    /**
     * 异步语音合成，返回音频数据的流
     * 
     * @param text  要合成的文本
     * @param voice 音色（可选，默认使用longxiaochun_v2）
     * @return Mono<byte[]> 音频数据
     */
    public Mono<byte[]> synthesizeSpeech(String text, String voice) {
        if (text == null || text.trim().isEmpty()) {
            return Mono.error(new IllegalArgumentException("文本不能为空"));
        }

        String finalVoice = voice != null ? voice : voiceConfig.getVoice().getDefaultVoice();

        return Mono.create(sink -> {
            CountDownLatch latch = new CountDownLatch(1);
            AtomicReference<byte[]> audioData = new AtomicReference<>();
            AtomicReference<Exception> error = new AtomicReference<>();

            // 实现回调接口
            ResultCallback<SpeechSynthesisResult> callback = new ResultCallback<SpeechSynthesisResult>() {
                @Override
                public void onEvent(SpeechSynthesisResult result) {
                    log.info("收到语音合成事件: {}", result);
                    if (result.getAudioFrame() != null) {
                        // getAudioFrame()返回ByteBuffer，需要转换为byte[]
                        byte[] frameData = new byte[result.getAudioFrame().remaining()];
                        result.getAudioFrame().get(frameData);

                        log.info("{} 收到音频数据，大小: {} bytes",
                                LocalDateTime.now().format(formatter),
                                frameData.length);

                        // 累积音频数据
                        byte[] currentData = audioData.get();
                        if (currentData == null) {
                            audioData.set(frameData);
                        } else {
                            // 合并音频数据
                            byte[] newData = new byte[currentData.length + frameData.length];
                            System.arraycopy(currentData, 0, newData, 0, currentData.length);
                            System.arraycopy(frameData, 0, newData, currentData.length, frameData.length);
                            audioData.set(newData);
                        }
                    }
                }

                @Override
                public void onComplete() {
                    log.info("{} 语音合成完成，总音频数据大小: {} bytes",
                            LocalDateTime.now().format(formatter),
                            audioData.get() != null ? audioData.get().length : 0);
                    latch.countDown();
                }

                @Override
                public void onError(Exception e) {
                    log.error("语音合成出现异常: {}", e.getMessage(), e);
                    error.set(e);
                    latch.countDown();
                }
            };

            // 请求参数
            SpeechSynthesisParam param = SpeechSynthesisParam.builder()
                    .apiKey(voiceConfig.getApiKey()) // 设置API Key
                    .model(voiceConfig.getVoice().getModel())
                    .voice(finalVoice)
                    .build();

            log.info("开始语音合成 - 文本: '{}', 音色: '{}', 模型: '{}'",
                    text, finalVoice, voiceConfig.getVoice().getModel());

            // 创建语音合成器
            SpeechSynthesizer synthesizer = new SpeechSynthesizer(param, callback);

            // 异步执行语音合成
            CompletableFuture.runAsync(() -> {
                try {
                    synthesizer.call(text);
                    // 等待合成完成
                    latch.await();

                    // 关闭连接
                    synthesizer.getDuplexApi().close(1000, "bye");

                    // 检查是否有错误
                    if (error.get() != null) {
                        sink.error(error.get());
                    } else {
                        byte[] result = audioData.get();
                        if (result != null) {
                            sink.success(result);
                        } else {
                            sink.error(new RuntimeException("未收到音频数据"));
                        }
                    }
                } catch (Exception e) {
                    log.error("语音合成执行异常: {}", e.getMessage(), e);
                    sink.error(e);
                }
            });
        });
    }

    /**
     * 简化版本，使用默认音色
     */
    public Mono<byte[]> synthesizeSpeech(String text) {
        return synthesizeSpeech(text, voiceConfig.getVoice().getDefaultVoice());
    }
}
