package com.tianji.aigc.service.impl;

import com.github.houbb.opencc4j.util.ZhConverterUtil;
import com.tianji.aigc.service.AudioService;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.audio.transcription.AudioTranscriptionPrompt;
import org.springframework.ai.audio.transcription.AudioTranscriptionResponse;
import org.springframework.ai.openai.OpenAiAudioSpeechModel;
import org.springframework.ai.openai.OpenAiAudioTranscriptionModel;
import org.springframework.ai.openai.audio.speech.SpeechPrompt;
import org.springframework.ai.openai.audio.speech.SpeechResponse;
import org.springframework.core.io.Resource;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;
import org.springframework.web.servlet.mvc.method.annotation.ResponseBodyEmitter;
import reactor.core.publisher.Flux;

import java.io.IOException;

@Slf4j
@Service
@RequiredArgsConstructor
public class OpenAIAudioServiceImpl implements AudioService {


    private final OpenAiAudioSpeechModel openAiAudioSpeechModel;

    @Override
    public ResponseBodyEmitter ttsStream(String text) {
        //1.创建响应发射器：ResponseBodyEmitter 用于向客户端异步发送数据
        ResponseBodyEmitter emitter = new ResponseBodyEmitter();
        log.info("开始语音合成, 文本内容：{}", text);
        //构建语音请求对象
        SpeechPrompt speechPrompt = new SpeechPrompt(text);
        //调用模型接口获取 Flux<SpeechResponse> 流式响应。
        Flux<SpeechResponse> responseStream = openAiAudioSpeechModel.stream(speechPrompt);
        //订阅响应流并发送数据
        responseStream.subscribe(
                //内部产生的流式数据自动传递进来的参数
                speechResponse -> {
                    byte[] output = speechResponse.getResult().getOutput();
                    try {
                        emitter.send(output);// 发送给客户端。
                    } catch (IOException e) {
                        emitter.completeWithError(e);
                    }

                },
                emitter::completeWithError,// 错误处理
                emitter::complete// 完成处理
        );
        return emitter;//返回响应发射器
    }

    private final OpenAiAudioTranscriptionModel openAiAudioTranscriptionModel;

    @Override
    public String stt(MultipartFile multipartFile) {
        // 将MultipartFile转换为Resource
        Resource audioResource = multipartFile.getResource();
        // 创建语音识别请求
        AudioTranscriptionPrompt transcriptionRequest = new AudioTranscriptionPrompt(audioResource);
        // 调用OpenAiAudioTranscriptionModel进行语音识别
        AudioTranscriptionResponse response = openAiAudioTranscriptionModel.call(transcriptionRequest);
        // 获取识别结果
        String output = response.getResult().getOutput();
        // 将繁体转换为简体
        return ZhConverterUtil.toSimple(output);
    }
}
