<template>

    <el-button
        ref="recordButton"
        @mousedown="startRecording"
        @mouseup="stopRecording"
        @mouseleave="stopRecording"
    >
        <el-icon>
            <Microphone/>
        </el-icon>
    </el-button>
    <el-input v-model="result"></el-input>
</template>

<script setup>
import {ref, onMounted, onUnmounted} from "vue";
import {ElButton, ElIcon} from "element-plus";
import {Microphone} from "@element-plus/icons-vue";


const recordButton = ref(null);
const result = ref("");
const recording = ref(false);
const mediaRecorder = ref(null);
const URL = "wss://nls-gateway.cn-shanghai.aliyuncs.com/ws/v1?token=" + TOKEN;
const APPKEY = "mzqyNr2RY4j8Y3sb"; // 替换为你的AppKey
const TOKEN = "1cbd5d8980a24541830afb3ac9377d17"; // 替换为你的Token

let ws;
let audioContext;
let scriptProcessor;
let audioInput;
let audioStream;

// 生成 UUID
function generateUUID() {
    return ([1e7] + -1e3 + -4e3 + -8e3 + -1e11).replace(/[018]/g, c =>
        (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
    ).replace(/-/g, '');
}

const startRecording = async () => {
    if (recording.value) return;
    // 初始化WebSocket连接
    ws = new WebSocket(URL);
    console.log("连接上了");
    let state = ws.readyState;
    console.log("state:", state)

    ws.onopen = () => {
        console.log("WebSocket connection opened");
        const startMessage = {
            "header": {
                namespace: "SpeechTranscriber",
                name: "StartTranscription",
                task_id: generateUUID(),
                message_id: generateUUID(),
                "appkey": APPKEY
            },
            "payload": {
                "format": "pcm",
                "sample_rate": 16000,
                "enable_intermediate_result": true,
                "enable_punctuation_prediction": true,
                "enable_inverse_text_normalization": true
            }
        };
        console.log("startMessage:{}", startMessage)
        ws.send(JSON.stringify(startMessage));
        console.log("发送出去了")
    };


    ws.onmessage = (msg) => {
        let res = JSON.parse(msg.data)
        console.log(res);
        console.log("msg.data.header:", res.header)
        if (res.header?.name === "TranscriptionResultChanged" && res.header.status === 20000000) {
            result.value += res.payload.result;
        }
    };

    ws.onerror = (error) => {
        console.error("WebSocket error:", error);
    };

    ws.onclose = () => {
        console.log("WebSocket connection closed");
    };

    if (state === 0) {
        try {
            // 获取音频输入设备
            audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
            audioContext = new (window.AudioContext || window.webkitAudioContext)({
                sampleRate: 16000
            });
            audioInput = audioContext.createMediaStreamSource(audioStream);

            // 设置缓冲区大小为2048的脚本处理器
            scriptProcessor = audioContext.createScriptProcessor(2048, 1, 1);

            scriptProcessor.onaudioprocess = function(event) {
                const inputData = event.inputBuffer.getChannelData(0);
                const inputData16 = new Int16Array(inputData.length);
                for (let i = 0; i < inputData.length; ++i) {
                    inputData16[i] = Math.max(-1, Math.min(1, inputData[i])) * 0x7FFF; // PCM 16-bit
                }
                if (ws && ws.readyState === WebSocket.OPEN) {
                    ws.send(inputData16.buffer);
                    console.log('发送音频数据块');
                }
            };

            audioInput.connect(scriptProcessor);
            scriptProcessor.connect(audioContext.destination);
        } catch (e) {
            console.log('录音失败: ' + e);
        }
    }
    recording.value = true;
};

const stopRecording = () => {
    if (scriptProcessor) {
        scriptProcessor.disconnect();
    }
    if (audioInput) {
        audioInput.disconnect();
    }
    if (audioStream) {
        audioStream.getTracks().forEach(track => track.stop());
    }
    if (audioContext) {
        audioContext.close();
    }

    if (mediaRecorder.value) {
        mediaRecorder.value.stop();
    }
    if (ws) {
        ws.close();
    }
    recording.value = false;
};
</script>

<style scoped>
.result {
    margin-top: 10px;
    font-size: 16px;
    color: #333;
}
</style>