package org.tio.showcase.test;

import com.alibaba.dashscope.audio.asr.translation.TranslationRecognizerParam;
import com.alibaba.dashscope.audio.asr.translation.TranslationRecognizerRealtime;
import com.alibaba.dashscope.audio.asr.translation.results.Translation;
import com.alibaba.dashscope.exception.NoApiKeyException;
import io.reactivex.BackpressureStrategy;
import io.reactivex.Flowable;
import org.tio.showcase.websocket.server.ShowcaseWsMsgHandler;

import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.TargetDataLine;
import java.nio.ByteBuffer;

/**
 * https://www.tiocloud.com/tio/docs/t-io/demo/tiowsc/
 *
 * @author chenck
 * @date 2025/3/11 16:45
 */
public class AudioWebSocketClient1 {

    public static void main(String[] args) throws Exception {


        // 如果是语音输入，分块发送字节流
        Flowable<ByteBuffer> audioFlowable = createAudioSourceWithControl(new Object(), false);

        // 创建Recognizer
        TranslationRecognizerRealtime translator = new TranslationRecognizerRealtime();
        // 创建TranslationRecognizerParam，audioFrames参数中传入上面创建的Flowable<ByteBuffer>
        TranslationRecognizerParam param =
                TranslationRecognizerParam.builder()
                        .model("gummy-realtime-v1")
                        .format("pcm") // 'pcm'、'wav'、'opus'、'speex'、'aac'、'amr', you
                        // can check the supported formats in the document
                        .sampleRate(16000) // supported 8000、16000
                        .apiKey("sk-9a90b5508e2e4e2c90418136245e239a")
                        .transcriptionEnabled(true)
                        .translationEnabled(true)
                        .translationLanguages(new String[]{ShowcaseWsMsgHandler.target_language})
                        .build();

        // Stream call interface for streaming audio to recognizer
        try {
            translator
                    .streamCall(param, audioFlowable)
                    .blockingForEach(result -> {
                        if (result.getTranscriptionResult() != null) {
                            // 打印最终结果
                            String transcriptionText = result.getTranscriptionResult().getText();
                            if (result.isSentenceEnd()) {
                                System.out.println("Fix: " + transcriptionText);
                                System.out.println("Stash: " + result.getTranscriptionResult().getStash());
                            } else {
                                System.out.println("Temp Result:" + result.getTranscriptionResult().getText());
                            }
                        }
                        if (result.getTranslationResult() != null) {
                            Translation targetTranslation = result.getTranslationResult().getTranslation(ShowcaseWsMsgHandler.target_language);
                            if (targetTranslation != null) {
                                if (result.isSentenceEnd()) {
                                    System.out.println("Fix to " + ShowcaseWsMsgHandler.target_language + ": " + targetTranslation.getText());
                                    System.out.println("Stash to " + ShowcaseWsMsgHandler.target_language + ": " + targetTranslation.getStash());
                                } else {
                                    System.out.println("Temp Result:" + result.getTranslationResult().getTranslation("en").getText());
                                }
                            }
                        }
                        if (result.isSentenceEnd()) {
                            System.out.println("\tRequestId: " + result.getRequestId() + " Usage: " + result.getUsage());
                        }
                    });
        } catch (NoApiKeyException e) {
            throw new RuntimeException(e);
        }
        System.out.println("Recognition onComplete! , exit program...");
    }


    private static Flowable<ByteBuffer> createAudioSourceWithControl(Object exitFlag, boolean shouldExit) {
        // Create a Flowable<ByteBuffer> for streaming audio data
        return Flowable.create(
                emitter -> {
                    try {
                        // 设置音频格式
                        AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);

                        // 获取音频输入设备
                        TargetDataLine targetDataLine = AudioSystem.getTargetDataLine(audioFormat);

                        // 打开音频输入设备
                        targetDataLine.open(audioFormat);
                        targetDataLine.start();

                        // 读取录制的音频数据
                        ByteBuffer buffer = ByteBuffer.allocate(1024);

                        while (!shouldExit) {
                            int read = targetDataLine.read(buffer.array(), 0, buffer.capacity());
                            if (read > 0) {
                                buffer.limit(read);
                                emitter.onNext(buffer);
                                buffer = ByteBuffer.allocate(1024);
                                Thread.sleep(20); // Small delay to control CPU usage
                            }
                            synchronized (exitFlag) {
                                if (shouldExit) {
                                    emitter.onComplete();
                                    break;
                                }
                            }
                        }
                    } catch (Exception e) {
                        emitter.onError(e);
                        System.exit(0);
                    }
                },
                BackpressureStrategy.BUFFER);
    }
}
