<!DOCTYPE html>
<html lang="zh">

<head>
    <meta charset="UTF-8">
    <meta name="viewport"
        content="width=device-width, initial-scale=1.0, user-scalable=no, maximum-scale=1, minimum-scale=1">
    <title>和AI语音聊天</title>
    <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/layui/2.7.6/css/layui.css">
    <style>
        body {
            max-width: 500px;
            margin: auto;
        }

        h1 {
            margin-top: 20px;
            text-align: center;
        }

        #tips {
            padding-bottom: 20px;
        }

        #audioPlayback {
            position: relative;
            text-align: center;
            height: 100%;
            width: 100%;
        }

        #recordButton {
            padding: 10px 20px;
            font-size: 16px;
            background-color: #3a943d;
            color: white;
            border: none;
            cursor: pointer;
            width: 90%;
            height: 48px;
            text-align: center;
            top: 80%;
            border-radius: 5px;
        }

        #recordButton.recording {
            background-color: #a9a9a9;
        }

        .noselect {
            -webkit-user-select: none;
            /* Chrome/Safari */
            -moz-user-select: none;
            /* Firefox */
            -ms-user-select: none;
            /* Internet Explorer/Edge */
            user-select: none;
            /* Non-prefixed version, currently supported by most browsers */
        }

        #textSelect {
            display: block;
            /* 确保textarea是块级元素 */
            width: 90%;
            height: 28px;
            margin: 10px auto;
            box-sizing: border-box;
            /* 包含内边距和边框在宽度计算中 */
        }

        #textInput {
            display: block;
            /* 确保textarea是块级元素 */
            width: 90%;
            margin: 10px auto;
            padding: 5px;
            box-sizing: border-box;
            /* 包含内边距和边框在宽度计算中 */
        }

        #submitButton {
            display: block;
            /* 确保textarea是块级元素 */
            width: 90%;
            height: 32px;
            margin: 10px auto;
            box-sizing: border-box;
            /* 包含内边距和边框在宽度计算中 */
            background-color: #6dc470;
            color: white;
            border: 0px;
            border-radius: 5px;
        }

        #messageBox {
            width: 90%;
            height: 200px;
            /* 固定高度 */
            border: 1px solid #ccc;
            padding: 10px;
            overflow-y: auto;
            /* 启用垂直滚动 */
            margin: 20px auto;
            /* 水平居中 */
            box-sizing: border-box;
            /* 包含内边距和边框在宽度计算中 */
            background-color: #f9f9f9;
            /* 背景色 */
        }
    </style>
    <script src="https://cdnjs.cloudflare.com/ajax/libs/layui/2.7.6/layui.js"></script>
    <script src="https://cdnjs.cloudflare.com/ajax/libs/recorderjs/0.1.0/recorder.js"
        integrity="sha512-zSq4Vvm00k8M01OLF/SmwKryVpA7YVXIbEFHU1rvNw3pgH50SjL6O4nDbB65V76YKWmr3rPABOXJ+uz+Z3BEmw=="
        crossorigin="anonymous" referrerpolicy="no-referrer"></script>
    <script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.8.1/socket.io.js"
        integrity="sha512-8BHxHDLsOHx+flIrQ0DrZcea7MkHqRU5GbTHmbdzMRnAaoCIkZ97PqZcXJkKZckMMhqfoeaJE+DNUVuyoQsO3Q=="
        crossorigin="anonymous" referrerpolicy="no-referrer"></script>
</head>

<body>
    <h1>和AI语音聊天</h1>
    <select id="textSelect">
        <option value="" disabled selected>请选择一个AI身份</option>
        <!-- <option value="请将我发给你的信息，翻译为英语讲出来。">英语口语教师</option>
        <option value="请将我发给你的信息，翻译为日语讲出来。">日语口语教师</option> -->
    </select>
    <textarea id="textInput" rows="5" placeholder=""></textarea>
    <button id="submitButton">更新AI的身份定位</button>
    <div id="messageBox"></div>
    <div id="audioPlayback">
        <!-- <p id="tips">按住按钮开始说话...</p> -->
        <button id="recordButton" class="noselect">按住 说话</button>
    </div>

    <!-- 脚本内容 -->
    <script>
        let recorder;
        let audioStream;
        let audioContext;
        let starTime; // 标记开始时间戳
        let isStop; // 标记是否已结束
        let isPlaying = false; // 标记当前是否正在播放音频
        const audioQueue = []; // 用于存储音频数据流
        const messageBox = document.getElementById('messageBox');
        const textSelect = document.getElementById('textSelect');
        const textInput = document.getElementById('textInput');
        const submitButton = document.getElementById('submitButton');
        const recordButton = document.getElementById('recordButton');

        const socket = io();

        // 页面加载时执行的脚本，获取麦克风权限、设置默认的AI角色，初始化sid等。
        window.addEventListener('load', async () => {
            try {
                await navigator.mediaDevices.getUserMedia({ audio: true });
                console.log('麦克风权限已获取');
            } catch (error) {
                console.error('获取麦克风权限失败:', error);
            }
            textInput.placeholder = "请从上面下拉框中选择一个AI角色，或者手动输入一个AI角色的描述。当前角色是：\n" + localStorage.getItem('instructions_text');
            loadAiOptions()
        });

        // 加载AI角色列表，如果本地localairoleoption有内容就加载本地的内容，如果本地没有内容就请求服务端获取。
        async function loadAiOptions() {
            const localData = localStorage.getItem('localairoleoption');
            if (localData && localData !== '[]') {
                console.log('从本地localStorage加载AI角色列表。');
                const options = JSON.parse(localData);
                constructAiSelect(options);
            } else {
                try {
                    console.log('从服务端加载AI角色内容。');
                    const response = await fetch('/airoleoptions');
                    if (!response.ok) {
                        throw new Error('Network response was not ok');
                    }
                    const options = await response.json();
                    localStorage.setItem('localairoleoption', JSON.stringify(options));
                    constructAiSelect(options);
                } catch (error) {
                    console.error('Error loading options:', error);
                }
            }
        }

        // 加载AI角色数据，并重新构造AI角色下拉选择框。
        function constructAiSelect(options) {
            textSelect.innerHTML = '<option value="" disabled selected>请选择一个AI身份</option>';
            options.forEach(option => {
                const opt = document.createElement('option');
                opt.value = option.value;
                opt.textContent = option.label;
                textSelect.appendChild(opt);
            });
        }

        // 在前端设置AI的身份信息，监听AI身份选择的下拉框选择事件
        textSelect.addEventListener('change', () => {
            const selectedText = textSelect.value;
            if (selectedText) {
                textInput.value = selectedText;
            }
        });

        // 监听「更新AI身份定位」按钮的点击事件
        submitButton.addEventListener('click', async () => {
            const instructions_text = textInput.value.trim();
            if (instructions_text === '') {
                alert('请输入内容再提交！')
                return;
            }
            localStorage.setItem('instructions_text', instructions_text);
            appendMessage("AI身份已经更新为：" + instructions_text);
            //和现有数据比对，如果是新的就加入到现有数据中。
            const localData = localStorage.getItem('localairoleoption');
            const oldoptions = JSON.parse(localData);
            const exists = oldoptions.some(option => option.value === instructions_text); //判断新输入的内容是不是之前的内容相同。
            if (!exists) {
                // 如果不存在，就是一条新内容，可以增加到本地数据中，以便后续继续使用。Add new option if it doesn't exist
                const newOption = {
                    label: instructions_text.substring(0, 8),
                    value: instructions_text
                };
                oldoptions.unshift(newOption);  // Add to the beginning of the array
                localStorage.setItem('localairoleoption', JSON.stringify(oldoptions));
                constructAiSelect(oldoptions);
            }
        });

        // 把文本投屏到消息框定位到消息框的底部。
        function appendMessage(message) {
            const messageElement = document.createElement('div');
            messageElement.textContent = message;
            messageBox.appendChild(messageElement);
            messageBox.scrollTop = messageBox.scrollHeight;
        }

        // 此函数用于监控录制按钮状态的改变
        function recordStateChange(state) {
            if (state == 'recording') {
                isStop = false
                recordButton.classList.add('recording')
                recordButton.innerText = '等待网页录音开启中'
                // tips.innerText = '按住按钮开始说话...'
            }
            if (state == 'start') {
                recordButton.classList.add('recording')
                recordButton.innerText = '松开 发送'
                // tips.innerText = '正在录音...'
            }
            if (state == 'stop') {
                isStop = true
                recordButton.classList.remove('recording')
                // tips.innerText = '按住按钮开始说话...'
                recordButton.innerText = '按住 说话'
            }
        }

        //此函数用于触发开始录音
        async function startRecordAudio() {
            audioQueue.length = 0;
            recordStateChange('recording')
            // 创建或恢复 AudioContext
            if (!audioContext) {
                audioContext = new (window.AudioContext || window.webkitAudioContext)({ sampleRate: 24000 });
            }
            // 恢复 AudioContext
            if (audioContext.state === 'suspended') {
                await audioContext.resume();
            }
            starTime = (+ new Date())
            if (isStop) return

            audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
            const input = audioContext.createMediaStreamSource(audioStream);
            recorder = new Recorder(input, { numChannels: 1 });
            recorder.record();
            // 切换按钮显示状态
            recordStateChange('start')
        }

        //此函数用于停止录音，处理录制的音频数据，并提交到服务端。
        function stopAudioRecord() {
            recordStateChange('stop')
            recorder && recorder.stop();
            if (!starTime || ((+ new Date()) - starTime < 1000)) {
                starTime = null
                return layer.msg('说话时间太短了...')
            }
            starTime = null
            audioStream.getTracks().forEach(track => track.stop());
            recorder.exportWAV(blob => {
                // layer.msg('处理中...', { icon: 16, shade: [0.2, '#000'], time: 20000 })
                // 将 Blob 转换为 Base64
                const reader = new FileReader();
                reader.onloadend = () => {
                    const base64data = reader.result.split(',')[1]; // 去掉前缀部分
                    // 发送 Base64 数据到服务器
                    uploadAudio(base64data);
                };
                reader.readAsDataURL(blob);
            });

            // 切换按钮显示状态
            // document.getElementById('tips').textContent = '按住按钮开始录音...';
        }

        recordButton.addEventListener('touchstart', startRecordAudio);
        recordButton.addEventListener('touchend', stopAudioRecord);

        recordButton.addEventListener('mousedown', startRecordAudio);
        recordButton.addEventListener('mouseup', stopAudioRecord);

        // 上传 Base64 编码的音频到服务器
        async function uploadAudio(base64data) {
            const sid = localStorage.getItem('sid');
            const instructions_text = localStorage.getItem('instructions_text')

            const response = await fetch('/upload_audio', {
                method: 'POST',
                headers: {
                    'Content-Type': 'application/json'
                },
                body: JSON.stringify({ audio: base64data, sid: sid, instructions_text: instructions_text })
            });

            // 整个音频播放完成之后才会执行，就不需要了。
            // if (response.ok) {
            //     layer.closeAll()
            //     console.log('音频上传成功');
            // } else {
            //     layer.msg('音频上传失败')
            // }
        }



        // 监听服务端生成的音频数据
        socket.on('audio', async (audioData) => {
            const audioResData = atob(audioData); // 解码 Base64 数据
            const audioBuffer = new Uint8Array(audioResData.length);
            for (let i = 0; i < audioResData.length; i++) {
                audioBuffer[i] = audioResData.charCodeAt(i);
            }
            const int16Array = new Int16Array(audioBuffer.buffer);
            // 将 PCM 数据转换为 WAV 格式
            const wavBlob = pcmToWav(int16Array, 24000); // 假设采样率为 24000
            const wavArrayBuffer = await wavBlob.arrayBuffer();
            // 将音频数据添加到队列
            audioQueue.push(wavArrayBuffer);
            playNextAudio(); // 尝试播放下一个音频
            //console.log('接收到音频数据');
        });

        // 监听服务端返回的sid
        socket.on('receive_sid', (data) => {
            const sid = data.sid;
            console.log('从服务端获取的sid:', sid);
            // 保存 sid 以便在后续请求中使用
            localStorage.setItem('sid', sid);
        });

        // 监听服务端生成的文本数据，并投屏到消息框。
        socket.on('dialogue', async (dialogue_text) => {
            appendMessage(dialogue_text)
        });


        // 此函数用于将PCM音频数据转为WAV格式音频数据
        function pcmToWav(pcmData, sampleRate) {
            const numChannels = 1; // 单声道
            const bytesPerSample = 2; // 16-bit PCM
            const buffer = new ArrayBuffer(44 + pcmData.length * bytesPerSample);
            const view = new DataView(buffer);
            // 写入 WAV 头
            function writeString(str, offset) {
                for (let i = 0; i < str.length; i++) {
                    view.setUint8(offset + i, str.charCodeAt(i));
                }
            }
            writeString('RIFF', 0);
            view.setUint32(4, 36 + pcmData.length * bytesPerSample, true); // 文件大小
            writeString('WAVE', 8);
            writeString('fmt ', 12);
            view.setUint32(16, 16, true); // fmt chunk size
            view.setUint16(20, 1, true); // audio format (PCM)
            view.setUint16(22, numChannels, true); // num channels
            view.setUint32(24, sampleRate, true); // sample rate
            view.setUint32(28, sampleRate * numChannels * bytesPerSample, true); // byte rate
            view.setUint16(32, numChannels * bytesPerSample, true); // block align
            view.setUint16(34, 16, true); // bits per sample
            writeString('data', 36);
            view.setUint32(40, pcmData.length * bytesPerSample, true); // data chunk size
            // 写入 PCM 数据
            for (let i = 0; i < pcmData.length; i++) {
                view.setInt16(44 + i * bytesPerSample, pcmData[i], true); // 16-bit PCM
            }
            return new Blob([view], { type: 'audio/wav' });
        }

        // 此函数用于播放队列中的音频数据。
        async function playNextAudio() {
            console.log(audioQueue.length)
            if (isPlaying || audioQueue.length === 0) {
                return; // 如果正在播放或队列为空，返回
            }
            isPlaying = true; // 设置为正在播放状态
            const wavArrayBuffer = audioQueue.shift(); // 从队列中取出下一个音频
            try {
                const buffer = await audioContext.decodeAudioData(wavArrayBuffer);
                const source = audioContext.createBufferSource();
                source.buffer = buffer;
                source.connect(audioContext.destination);

                // 播放音频并添加结束事件监听器
                source.onended = () => {
                    isPlaying = false; // 播放结束，设置为未播放状态
                    playNextAudio(); // 尝试播放下一个音频
                };

                source.start(0); // 播放音频
            } catch (error) {
                console.error('解码音频数据失败:', error);
                isPlaying = false; // 如果解码失败，重置播放状态
                playNextAudio(); // 尝试播放下一个音频
            }
        }

    </script>
</body>

</html>