<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>音频输入和录音</title>
</head>
<body>
<div id="app">
    <h1>音频输入和录音</h1>
    <div id="recordingStatus">
        <p id="statusText">正在检测说话...</p>
        <div id="volumeBar" style="height: 10px; width: 100%; background-color: blue; transform: scaleX(0);"></div>
    </div>
    <audio id="audioPlayer" controls style="display: none;"></audio>
    <div id="volumeLevel"></div>
</div>

<script>
    let wordList=new Array('ai设计','自主选型')
    console.log("当前页面你可以有行为：",wordList.join())
    document.addEventListener('DOMContentLoaded', async function () {
        const statusText = document.getElementById('statusText');
        const volumeBar = document.getElementById('volumeBar');
        const audioPlayer = document.getElementById('audioPlayer');
        const volumeLevelDisplay = document.getElementById('volumeLevel');
        let audioContext = null;
        let mediaRecorder = null;
        let audioChunks = [];
        let audioStream = null;
        let isRecording = false;
        let lastSoundTime = 0;
        const maxVolumeThreshold = 0.025;//音量大于此值时开始录音
        const waitTime=2000//音量降低到阈值（maxVolumeThreshold）以下waitTime后停止录音并上传文件
        async function initRecording() {
            if (!navigator.mediaDevices) {
                alert('浏览器不支持音频输入设备！');
                return;
            }

            // 获取用户麦克风
            audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
            audioContext = new AudioContext();

            // 创建录音分析工具
            const source = audioContext.createMediaStreamSource(audioStream);
            const analyser = audioContext.createAnalyser();
            source.connect(analyser);
            analyser.fftSize = 2048;
            const bufferLength = analyser.frequencyBinCount;
            const dataArray = new Uint8Array(bufferLength);

            // 配置录音
            mediaRecorder = new MediaRecorder(audioStream);
            mediaRecorder.ondataavailable = e => audioChunks.push(e.data);
            mediaRecorder.onstop = handleRecordingStop;

            // 开始音量检测
            const checkAudio = () => {
                analyser.getByteTimeDomainData(dataArray);
                let sum = 0;
                for (let i = 0; i < bufferLength; i++) {
                    sum += Math.abs(dataArray[i] - 128);
                }
                const currentVolumeLevel = sum / bufferLength / 128;
                volumeLevelDisplay.textContent = `音量：${currentVolumeLevel.toFixed(2)}`;
                volumeBar.style.transform = `scaleX(${currentVolumeLevel})`;

                // 开始说话
                if (currentVolumeLevel > maxVolumeThreshold) {
                    lastSoundTime = Date.now();
                    if (!isRecording) {
                        startRecording();
                    }
                }

                // 停止说话
                if (isRecording && Date.now() - lastSoundTime > waitTime) {//安静时间大于3秒
                    stopRecording();
                }

                requestAnimationFrame(checkAudio);
            };

            checkAudio();
        }

        function startRecording() {
            audioChunks = [];
            mediaRecorder.start();
            isRecording = true;
            statusText.textContent = '正在录音中...';
        }

        function stopRecording() {
            mediaRecorder.stop();
            isRecording = false;
            statusText.textContent = '录音已停止，正在上传...';
        }

        function handleRecordingStop() {
            const audioBlob = new Blob(audioChunks);
            const audioUrl = URL.createObjectURL(audioBlob);
            audioPlayer.src = audioUrl;
            audioPlayer.style.display = 'block';
            sendAudioToServer(audioBlob);
            statusText.textContent = '上传完成，等待下一次检测...';
        }

        async function sendAudioToServer(audioBlob) {
            //console.log('音频数据已发送到服务器');
            //return;
            const formData = new FormData();
            formData.append('audio', audioBlob);
            formData.append('wordList',wordList)
            let xhr = new XMLHttpRequest();
            xhr.open("POST", "/upload", true);
            xhr.onreadystatechange = function() {
                if (xhr.readyState === 4 && xhr.status === 200) {
                    if(xhr.responseText!=='') {
                        let serverResponse=xhr.responseText
                        let responseArray = JSON.parse(serverResponse);
                        let tempArray=new Array()
                        for(let i=0;i<responseArray.length-1;i++)
                        {
                            tempArray.push(responseArray[i])
                        }
                        wordList=tempArray//更新当前页面可以有的行为，作为下次上传的参数
                        console.log("你刚才说："+responseArray[responseArray.length-1]+"，故当前页面你可以有行为："+tempArray.join())
                    }
                }
            };
            xhr.onloadstart=function () {
                //console.log('等待响应')
            }
            xhr.onerror = function() {
                console.log('网络错误')
            };
            xhr.send(formData);
        }

        await initRecording();
    });
</script>
</body>
</html>
