<!DOCTYPE html>
<html>
<head>
    <meta charset="utf-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>SenseVoice实时语音识别</title>
    <style>
        :root {
            --primary-color: #3b82f6;
            --primary-dark: #2563eb;
            --danger-color: #ef4444;
            --danger-dark: #dc2626;
            --background-color: #f8fafc;
            --card-background: #ffffff;
            --text-primary: #1e293b;
            --text-secondary: #64748b;
            --border-color: #e2e8f0;
            --shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.05);
            --shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
            --radius: 8px;
            --transition: all 0.2s ease;
        }

        * {
            margin: 0;
            padding: 0;
            box-sizing: border-box;
        }

        body {
            font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
            line-height: 1.6;
            color: var(--text-primary);
            background-color: var(--background-color);
            padding: 20px;
            max-width: 1000px;
            margin: 0 auto;
        }

        .app-container {
            background-color: var(--card-background);
            border-radius: var(--radius);
            box-shadow: var(--shadow);
            padding: 2rem;
            margin-top: 2rem;
        }

        .app-header {
            text-align: center;
            margin-bottom: 2rem;
        }

        .app-header h1 {
            color: var(--primary-color);
            margin-bottom: 0.5rem;
            font-size: 1.8rem;
        }

        .app-header p {
            color: var(--text-secondary);
            font-size: 0.95rem;
        }

        .controls {
            display: flex;
            flex-wrap: wrap;
            gap: 1rem;
            align-items: center;
            margin-bottom: 1.5rem;
            padding-bottom: 1.5rem;
            border-bottom: 1px solid var(--border-color);
        }

        #recordButton {
            background-color: var(--primary-color);
            color: white;
            border: none;
            padding: 0.75rem 1.5rem;
            border-radius: var(--radius);
            font-size: 1rem;
            font-weight: 600;
            cursor: pointer;
            transition: var(--transition);
            display: flex;
            align-items: center;
            gap: 0.5rem;
        }

        #recordButton:hover {
            background-color: var(--primary-dark);
            transform: translateY(-2px);
        }

        #recordButton.recording {
            background-color: var(--danger-color);
        }

        #recordButton.recording:hover {
            background-color: var(--danger-dark);
        }

        #recordButton svg {
            width: 18px;
            height: 18px;
        }

        .settings-group {
            display: flex;
            flex-wrap: wrap;
            gap: 1.5rem;
            align-items: center;
            margin-left: auto;
        }

        .form-group {
            display: flex;
            align-items: center;
            gap: 0.5rem;
        }

        label {
            color: var(--text-secondary);
            font-size: 0.9rem;
            cursor: pointer;
        }

        #lang {
            padding: 0.5rem 0.75rem;
            border: 1px solid var(--border-color);
            border-radius: var(--radius);
            font-size: 0.9rem;
            transition: var(--transition);
        }

        #lang:focus {
            outline: none;
            border-color: var(--primary-color);
            box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.1);
        }

        .result-section {
            margin-top: 1.5rem;
        }

        .result-section h2 {
            font-size: 1.2rem;
            margin-bottom: 1rem;
            color: var(--text-primary);
            display: flex;
            align-items: center;
            gap: 0.5rem;
        }

        .result-section h2 svg {
            width: 18px;
            height: 18px;
            color: var(--primary-color);
        }

        #transcriptionResult {
            white-space: pre-wrap;
            background-color: #f8fafc;
            padding: 1.25rem;
            border: 1px solid var(--border-color);
            border-radius: var(--radius);
            font-family: 'Consolas', 'Monaco', monospace;
            min-height: 150px;
            max-height: 300px;
            overflow-y: auto;
            font-size: 0.95rem;
            line-height: 1.6;
            transition: var(--transition);
        }

        #transcriptionResult:empty:before {
            content: "转录内容将显示在此处…";
            color: var(--text-secondary);
            font-style: italic;
        }

        @media (max-width: 640px) {
            .app-container {
                padding: 1.5rem;
            }

            .controls {
                flex-direction: column;
                align-items: stretch;
            }

            .settings-group {
                margin-left: 0;
                width: 100%;
            }

            #recordButton {
                width: 100%;
                justify-content: center;
            }
        }
    </style>
</head>
<body>
    <div class="app-container">
        <div class="app-header">
            <h1>流式SenseVoice 实时语音识别</h1>
            <p>使用 SenseVoice+WebSocket 技术进行实时音频转录</p>
        </div>

        <div class="controls">
            <button id="recordButton">
                <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor">
                    <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 18h.01M7 21h10a2 2 0 002-2V5a2 2 0 00-2-2H7a2 2 0 00-2 2v14a2 2 0 002 2z" />
                </svg>
                开始录制
            </button>

            <div class="settings-group">
                <div class="form-group">
                    <label for="lang">语言:</label>
                    <input id="lang" type="text" value="auto" />
                </div>

                <div class="form-group">
                    <input type="checkbox" id="speakerVerification">
                    <label for="speakerVerification">是否开启说话人验证（需提前上传说话人音频）</label>
                </div>
            </div>
        </div>

        <div class="result-section">
            <h2>
                <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor">
                    <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 12h6m-6 4h6m2 5H7a2 2 0 01-2-2V5a2 2 0 012-2h5.586a1 1 0 01.707.293l5.414 5.414a1 1 0 01.293.707V19a2 2 0 01-2 2z" />
                </svg>
                转录结果
            </h2>
            <div id="transcriptionResult"></div>
        </div>
    </div>

    <script>
        var recordButton = document.getElementById('recordButton');
        var transcriptionResult = document.getElementById('transcriptionResult');
        navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia;
        var ws = null;
        var record = null;
        var timeInte = null;
        var isRecording = false;

        recordButton.onclick = function() {
            if (!isRecording) {
                startRecording();
            } else {
                stopRecording();
            }
        };

        function startRecording() {
            console.log('Start Recording');
            var speakerVerificationCheckbox = document.getElementById('speakerVerification');
            var sv = speakerVerificationCheckbox.checked ? 1 : 0;
            var lang = document.getElementById("lang").value
            // Construct the query parameters
            var queryParams = [];
            if (lang) {
                queryParams.push(`lang=${lang}`);
            }
            if (sv) {
                queryParams.push('sv=1');
            }
            var queryString = queryParams.length > 0 ? `?${queryParams.join('&')}` : '';

            // ws = new WebSocket(`wss://your_wss_server_address/ws/transcribe${queryString}`);
            ws = new WebSocket(`ws://127.0.0.1:27000/ws/transcribe${queryString}`);
            // ws = new WebSocket(`ws://192.168.31.191:27001/ws/transcribe${queryString}`);
            ws.binaryType = 'arraybuffer';

            ws.onopen = function(event) {
                console.log('WebSocket connection established');
                record.start();
                timeInte = setInterval(function() {
                    if(ws.readyState === 1) {
                        var audioBlob = record.getBlob();
                        console.log('Blob size: ', audioBlob.size);

                        // Read the Blob content for debugging
                        var reader = new FileReader();
                        reader.onloadend = function() {
                            console.log('Blob content: ', new Uint8Array(reader.result));
                            ws.send(audioBlob);
                            console.log('Sending audio data');
                            record.clear();
                        };
                        reader.readAsArrayBuffer(audioBlob);
                    }
                }, 500);
            };

            ws.onmessage = function(evt) {
                console.log('Received message: ' + evt.data);
                try {
                    resJson = JSON.parse(evt.data)
                    if (resJson.code == 0) {
                        var jsonResponse = JSON.stringify(resJson, null, 4);
                        transcriptionResult.textContent += "\n" + (resJson.data || 'No speech recognized');
                        // Auto-scroll to bottom
                        transcriptionResult.scrollTop = transcriptionResult.scrollHeight;
                    }
                } catch (e) {
                    console.error('Failed to parse response data', e);
                    transcriptionResult.textContent += "\n" + evt.data;
                    // Auto-scroll to bottom
                    transcriptionResult.scrollTop = transcriptionResult.scrollHeight;
                }
            };

            ws.onclose = function() {
                console.log('WebSocket connection closed');
            };

            ws.onerror = function(error) {
                console.error('WebSocket error: ' + error);
            };

            recordButton.innerHTML = `
                <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor">
                    <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M6 18L18 6M6 6l12 12" />
                </svg>
                Stop Recording
            `;
            recordButton.classList.add("recording");
            isRecording = true;
        }

        function stopRecording() {
            console.log('Stop Recording');
            if (ws) {
                ws.close();
                record.stop();
                clearInterval(timeInte);
            }
            recordButton.innerHTML = `
                <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor">
                    <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 18h.01M7 21h10a2 2 0 002-2V5a2 2 0 00-2-2H7a2 2 0 00-2 2v14a2 2 0 002 2z" />
                </svg>
                Start Recording
            `;
            recordButton.classList.remove("recording");
            isRecording = false;
        }

        function init(rec) {
            record = rec;
        }

        if (!navigator.getUserMedia) {
            alert('Your browser does not support audio input');
        } else {
            navigator.getUserMedia(
                { audio: true },
                function(mediaStream) {
                    init(new Recorder(mediaStream));
                },
                function(error) {
                    console.log(error);
                }
            );
        }

        var Recorder = function(stream) {
            var sampleBits = 16; // Sample bits
            var inputSampleRate = 48000; // Input sample rate
            var outputSampleRate = 16000; // Output sample rate
            var channelCount = 1; // Single channel
            var context = new AudioContext();
            var audioInput = context.createMediaStreamSource(stream);
            var recorder = context.createScriptProcessor(4096, channelCount, channelCount);
            var audioData = {
                size: 0,
                buffer: [],
                inputSampleRate: inputSampleRate,
                inputSampleBits: sampleBits,
                clear: function() {
                    this.buffer = [];
                    this.size = 0;
                },
                input: function(data) {
                    this.buffer.push(new Float32Array(data));
                    this.size += data.length;
                },
                encodePCM: function() {
                    var bytes = new Float32Array(this.size);
                    var offset = 0;
                    for (var i = 0; i < this.buffer.length; i++) {
                        bytes.set(this.buffer[i], offset);
                        offset += this.buffer[i].length;
                    }
                    var dataLength = bytes.length * (sampleBits / 8);
                    var buffer = new ArrayBuffer(dataLength);
                    var data = new DataView(buffer);
                    offset = 0;
                    for (var i = 0; i < bytes.length; i++, offset += 2) {
                        var s = Math.max(-1, Math.min(1, bytes[i]));
                        data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
                    }
                    return new Blob([data], { type: 'audio/pcm' });
                }
            };

            this.start = function() {
                audioInput.connect(recorder);
                recorder.connect(context.destination);
            };

            this.stop = function() {
                recorder.disconnect();
            };

            this.getBlob = function() {
                return audioData.encodePCM();
            };

            this.clear = function() {
                audioData.clear();
            };

            function downsampleBuffer(buffer, inputSampleRate, outputSampleRate) {
                if (outputSampleRate === inputSampleRate) {
                    return buffer;
                }
                var sampleRateRatio = inputSampleRate / outputSampleRate;
                var newLength = Math.round(buffer.length / sampleRateRatio);
                var result = new Float32Array(newLength);
                var offsetResult = 0;
                var offsetBuffer = 0;
                while (offsetResult < result.length) {
                    var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
                    var accum = 0, count = 0;
                    for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
                        accum += buffer[i];
                        count++;
                    }
                    result[offsetResult] = accum / count;
                    offsetResult++;
                    offsetBuffer = nextOffsetBuffer;
                }
                return result;
            }

            recorder.onaudioprocess = function(e) {
                console.log('onaudioprocess called');
                var resampledData = downsampleBuffer(e.inputBuffer.getChannelData(0), inputSampleRate, outputSampleRate);
                audioData.input(resampledData);
            };
        };

    </script>
</body>
</html>
