package com.lndsnx.ai.controller;

import com.alibaba.cloud.ai.dashscope.audio.DashScopeAudioTranscriptionOptions;
import com.alibaba.cloud.ai.dashscope.audio.synthesis.SpeechSynthesisModel;
import com.alibaba.cloud.ai.dashscope.audio.synthesis.SpeechSynthesisPrompt;
import com.alibaba.cloud.ai.dashscope.audio.synthesis.SpeechSynthesisResponse;
import com.alibaba.cloud.ai.dashscope.audio.transcription.AudioTranscriptionModel;
import jakarta.annotation.Resource;
import org.springframework.ai.audio.transcription.AudioTranscriptionPrompt;
import org.springframework.ai.audio.transcription.AudioTranscriptionResponse;
import org.springframework.core.io.FileSystemResource;
import org.springframework.core.io.UrlResource;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import reactor.core.publisher.Flux;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.concurrent.CountDownLatch;


@RestController
@RequestMapping("/ai/audio")
public class AudioModelController {

    private static final String FILE_PATH = "spring-ai-dashscope-demo/src/main/resources/output";

    @Resource
    private SpeechSynthesisModel speechSynthesisModel;
    @Resource
    private AudioTranscriptionModel audioTranscriptionModel;

    private static final String DEFAULT_MODEL_1 = "sensevoice-v1";

    private static final String DEFAULT_MODEL_2 = "paraformer-realtime-v2";

    private static final String DEFAULT_MODEL_3 = "paraformer-v2";

    private static final String AUDIO_RESOURCES_URL = "https://dashscope.oss-cn-beijing.aliyuncs.com/samples/audio/paraformer/hello_world_female2.wav";


    @GetMapping("/tts")
    public ResponseEntity<byte[]> call(@RequestParam(value = "prompt", defaultValue = "君不见黄河之水天上来，奔流到海不复回！", required = false) String prompt) throws IOException {
        SpeechSynthesisResponse response = speechSynthesisModel.call(new SpeechSynthesisPrompt(prompt));
        ByteBuffer byteBuffer = response.getResult().getOutput().getAudio();
        // 将 ByteBuffer 转换为字节数组
        byte[] audioBytes = new byte[byteBuffer.remaining()];
        byteBuffer.get(audioBytes);

        // 返回音频流（MP3格式）
        return ResponseEntity.ok()
                .contentType(MediaType.APPLICATION_OCTET_STREAM)
                .header("Content-Disposition", "attachment; filename=output.mp3")
                .body(audioBytes);
    }

    @GetMapping("/stream/tts")
    public void stream(@RequestParam(value = "prompt", defaultValue = "君不见黄河之水天上来，奔流到海不复回！", required = false) String prompt) throws IOException {
        Flux<SpeechSynthesisResponse> response = speechSynthesisModel.stream(new SpeechSynthesisPrompt(prompt));
        CountDownLatch latch = new CountDownLatch(1);
        File file = new File(FILE_PATH + "/output.mp3");
        try (FileOutputStream fos = new FileOutputStream(file)) {
            response.doFinally(signal -> latch.countDown()
            ).subscribe(
                synthesisResponse -> {
                    ByteBuffer byteBuffer = synthesisResponse.getResult().getOutput().getAudio();
                    byte[] bytes = new byte[byteBuffer.remaining()];
                    byteBuffer.get(bytes);
                    try {
                        fos.write(bytes);
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                }
            );

            latch.await();
        } catch (IOException e) {
            throw new IOException(e.getMessage());
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
    }

    @GetMapping("/stt")
    public String callStt(@RequestParam(value = "prompt", defaultValue = "君不见黄河之水天上来，奔流到海不复回！", required = false) String prompt) throws IOException {
        AudioTranscriptionResponse response = audioTranscriptionModel.call(
                new AudioTranscriptionPrompt(
                        new UrlResource(AUDIO_RESOURCES_URL),
                        DashScopeAudioTranscriptionOptions.builder()
                                .withModel(DEFAULT_MODEL_1)
                                .build()
                )
        );

        return response.getResult().getOutput();
    }

    @GetMapping("/stream/stt")
    public String streamStt(@RequestParam(value = "prompt", defaultValue = "君不见黄河之水天上来，奔流到海不复回！", required = false) String prompt) throws IOException {
        CountDownLatch latch = new CountDownLatch(1);
        StringBuilder stringBuilder = new StringBuilder();

        Flux<AudioTranscriptionResponse> response = audioTranscriptionModel
                .stream(
                        new AudioTranscriptionPrompt(
                                new FileSystemResource("spring-ai-dashscope-demo/src/main/resources/output/output.mp3"),
                                DashScopeAudioTranscriptionOptions.builder()
                                        .withModel(DEFAULT_MODEL_2)
                                        .withSampleRate(16000)
                                        .withFormat(DashScopeAudioTranscriptionOptions.AudioFormat.PCM)
                                        .withDisfluencyRemovalEnabled(false)
                                        .build()
                        )
                );

        response.doFinally(
                signal -> latch.countDown()
        ).subscribe(
                resp -> stringBuilder.append(resp.getResult().getOutput())
        );

        try {
            latch.await();
        }
        catch (InterruptedException e) {
            throw new RuntimeException(e);
        }

        // 该方法的返回值会通过HTTP响应体返回给客户端
        // 因为这是一个@RestController方法，返回的String会被Spring框架自动写入到HTTP响应体中
        return stringBuilder.toString();
    }
}
