package org.tio.showcase.test;

import com.alibaba.dashscope.audio.asr.translation.TranslationRecognizerParam;
import com.alibaba.dashscope.audio.asr.translation.TranslationRecognizerRealtime;
import com.alibaba.dashscope.audio.asr.translation.results.Translation;
import com.alibaba.dashscope.exception.NoApiKeyException;
import io.reactivex.BackpressureStrategy;
import io.reactivex.Flowable;
import io.reactivex.schedulers.Schedulers;
import org.tio.core.Tio;
import org.tio.showcase.websocket.server.ShowcaseServerConfig;
import org.tio.showcase.websocket.server.ShowcaseWsMsgHandler;
import org.tio.websocket.client.WebSocket;
import org.tio.websocket.client.WsClient;
import org.tio.websocket.client.config.WsClientConfig;
import org.tio.websocket.common.WsPacket;
import org.tio.websocket.common.WsResponse;

import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.SourceDataLine;
import javax.sound.sampled.TargetDataLine;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Pattern;

/**
 * https://www.tiocloud.com/tio/docs/t-io/demo/tiowsc/
 *
 * @author chenck
 * @date 2025/3/11 16:45
 */
public class AudioWebSocketClient {

    public static void main(String[] args) throws Exception {

        WsClient echo =
                WsClient.create(
                        "ws://localhost:9326/text?encoding=BINARY",
                        new WsClientConfig(
                                e -> System.out.println("opened"),
                                e -> {
                                    WsPacket data = e.data;
                                    String dataStr = data.getWsBodyText();
                                    System.out.println("recv: " + dataStr);
                                },
                                e -> System.out.printf("on close: %d, %s, %s\n", e.code, e.reason, e.wasClean),
                                e -> System.out.println(String.format("on error: %s", e.msg)),
                                Throwable::printStackTrace));

        WebSocket webSocket = echo.connect();

        // 如果是语音输入，分块发送字节流
        // 语音断句、降噪、均衡，如何实现？
        Flowable<ByteBuffer> audioFlowable = createAudioSourceWithControl(new Object(), false);

        audioFlowable
                .observeOn(Schedulers.io()) // 在IO线程上处理数据
                .subscribe(bytes -> { // 处理每个音频块。例如，写入文件或进行其他处理
                    webSocket.send(bytes);
                });

    }


    private static Flowable<ByteBuffer> createAudioSourceWithControl(Object exitFlag, boolean shouldExit) {
        // Create a Flowable<ByteBuffer> for streaming audio data
        return Flowable.create(
                emitter -> {
                    try {
                        // 设置音频格式
                        AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);

                        // 获取音频输入设备
                        TargetDataLine targetDataLine = AudioSystem.getTargetDataLine(audioFormat);

                        // 打开音频输入设备
                        targetDataLine.open(audioFormat);
                        targetDataLine.start();

                        // 初始化 WebRTC AudioProcessing
                        // 降噪：提高语音质量‌,提升用户体验‌,减少带宽需求‌
//                        Builder builder = new Builder();
//                        builder.setRecordingDevice(AudioProcessing.RecordingDeviceId.ANDROID_DEFAULT);
//                        builder.setEchoCancellation(true);
//                        builder.setNoiseSuppression(true);
//                        builder.setHighPassFilter(true);
//                        builder.setAutomaticGainControl(true);
//                        AudioProcessing audioProcessing = builder.create();


                        // 读取录制的音频数据
                        ByteBuffer buffer = ByteBuffer.allocate(1024);

                        while (!shouldExit) {
                            int read = targetDataLine.read(buffer.array(), 0, buffer.capacity());
                            if (read > 0) {
                                buffer.limit(read);

                                // 创建 WebRTC AudioBuffer
//                                AudioProcessing.AudioBuffer audioBuffer = new AudioProcessing.AudioBuffer();
//                                audioBuffer.setFrameSize(read / 2); // 16-bit audio
//                                audioBuffer.setChannels(1);
//                                audioBuffer.setSampleRateHz(16000);
//                                audioBuffer.setData(buffer.array());
//
//                                // 应用 WebRTC AudioProcessing
//                                audioProcessing.processCaptureAudio(audioBuffer);
//
//                                // 获取处理后的音频数据
//                                byte[] processedData = audioBuffer.getData();

                                // 发送处理后的音频数据
                                emitter.onNext(buffer);
                                buffer = ByteBuffer.allocate(1024);
                                Thread.sleep(20); // Small delay to control CPU usage
                            }
                            synchronized (exitFlag) {
                                if (shouldExit) {
                                    emitter.onComplete();
                                    break;
                                }
                            }
                        }
                    } catch (Exception e) {
                        emitter.onError(e);
                        System.exit(0);
                    }
                },
                BackpressureStrategy.BUFFER);
    }
}
