let audioContext
let ws =new WebSocket("ws://localhost:9000/ws")
 ws.onopen = () => {
            console.log("WebSocket connection established");
 };
 ws.onmessage = (event) => {
            console.log("Recognized Text:", event.data);
 };
document.getElementById("start").addEventListener("click", async () => {
    try {
        audioContext=new AudioContext({ sampleRate: 48000 });
        const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
        const source = audioContext.createMediaStreamSource(stream);

        // 加载 AudioProcessor
        await audioContext.audioWorklet.addModule("/static/audio-processor.js");

        const workletNode = new AudioWorkletNode(audioContext, "audio-processor");

        // 接收来自 AudioProcessor 的音频数据
        workletNode.port.onmessage = (event) => {
            //console.log(event)
            //const { type, rms, data } = event.data;
            const float32Array = event.data;
            ws.send(float32Array.buffer); // 发送到服务端
        };

        source.connect(workletNode);
        workletNode.connect(audioContext.destination);
        console.log("Recording started...");
    } catch (err) {
        console.error("Error starting recording:", err);
    }
});

document.getElementById("stop").addEventListener("click", () => {
    if(audioContext){
        audioContext.close();
    }
    if(ws){
        ws.close();
    }
    console.log("Recording stopped.");
});
