<template>
    <div class="voice-controls">
        <el-tooltip v-if="!isRecording" class="item" effect="dark" content="语音输入" placement="top-start">
            <img src="@/assets/mkf.png" alt="start" @click="startRecording" />
        </el-tooltip>

        <div class="audio-waves" v-if="isRecording">
            <div class="wave" v-for="n in 5" :key="n"></div>
        </div>

        <div v-if="isRecording" class="countdown-timer">{{ formatTime(remainingTime) }}</div>
        <img v-if="isRecording" @click="stopRecording" style="height: 35px; margin-left: -10px;margin: 10px 0 0 -10px;" src="@/assets/stop.png" alt="stop" />
    </div>
</template>

<script setup>
import { ref, onUnmounted, defineExpose } from 'vue'
import CryptoJS from 'crypto-js'
import { v4 as uuidv4 } from 'uuid'; // ES6 模块导入
const emit = defineEmits(['speech-result']) // <-- 添加这一行
// 把需要被调用的方法暴露出去
defineExpose({
    stopimportfun,
});
const appId = '1318229055'
const secretId = 'AKIDQmwukfWFl5CA5qPp35ubO5mXmqpkWCi5'
const secretKey = 'IfuxrbKsEQQGtMdhZFKF1bzDKFyZ3MTO'

const isRecording = ref(false)
const remainingTime = ref(60)
let socket = null
let timer = null
let audioContext = null
let processor = null
let input = null
let stream = null
let lastResult = '' // 添加变量存储上一次的识别结果
let finalResult = '' // 添加变量存储最终结果

function generateSignedUrl() {
    const ts = Math.floor(Date.now() / 1000)
    const expired = ts + 3600
    const nonce = Math.floor(Math.random() * 1000000)
    // const voiceId = crypto.randomUUID()

    const voiceId = uuidv4();
    const params = {
        engine_model_type: '16k_zh',
        voice_format: 1,
        needvad: 1,
        vad_silence_time: 800,
        filter_punc: 1,
        filter_dirty: 1,
        filter_modal: 1,
        expired,
        nonce,
        secretid: secretId,
        timestamp: ts,
        voice_id: voiceId,
    }

    const keys = Object.keys(params).sort()
    const queryStr = keys.map(k => `${k}=${params[k]}`).join('&')
    const path = `asr.cloud.tencent.com/asr/v2/${appId}?${queryStr}`
    const signature = encodeURIComponent(CryptoJS.enc.Base64.stringify(CryptoJS.HmacSHA1(path, secretKey)))
    return `wss://${path}&signature=${signature}`
}

function convertFloat32ToInt16(buffer) {
    const l = buffer.length
    const result = new Int16Array(l)
    for (let i = 0; i < l; i++) {
        const s = Math.max(-1, Math.min(1, buffer[i]))
        result[i] = s < 0 ? s * 0x8000 : s * 0x7FFF
    }
    return new Uint8Array(result.buffer)
}

async function startRecording() {
    lastResult = ''
    finalResult = ''
    emit('speech-result', '') // 通知父组件清空
    const url = generateSignedUrl()
    socket = new WebSocket(url)

    socket.onopen = async () => {
        console.log('✅ WebSocket Connected')
        isRecording.value = true
        startCountdown()

        audioContext = new (window.AudioContext || window.webkitAudioContext)({ sampleRate: 16000 })
        stream = await navigator.mediaDevices.getUserMedia({ audio: true })
        input = audioContext.createMediaStreamSource(stream)

        processor = audioContext.createScriptProcessor(4096, 1, 1)
        processor.onaudioprocess = e => {

            const inputData = e.inputBuffer.getChannelData(0)
            // 添加简单的噪音抑制
            const threshold = 0.01; // 噪音阈值
            const filteredData = inputData.map(sample => {
                return Math.abs(sample) < threshold ? 0 : sample;
            });

            const int16Data = convertFloat32ToInt16(filteredData)
            if (socket.readyState === WebSocket.OPEN) {
                socket.send(int16Data)
            }


        }

        input.connect(processor)
        processor.connect(audioContext.destination)
    }

    socket.onmessage = (e) => {
        const res = JSON.parse(e.data)
        if (res.result?.voice_text_str) {
            const currentText = res.result.voice_text_str.trim()

            // 如果是最终结果（res.final为true）或者是新的内容
            if (res.final || currentText !== lastResult) {
                // 过滤太短的识别结果（可能是噪音）
                if (currentText.length > 1) {
                    // 过滤掉只包含标点符号的结果
                    if (!/^[\s\p{P}]+$/u.test(currentText)) {
                        // 如果是最终结果，直接发送
                        if (res.final) {
                            finalResult = currentText
                            emit('speech-result', finalResult)
                        } else {
                            // 如果不是最终结果，且新内容比之前的长，则更新
                            if (currentText.length > lastResult.length) {
                                lastResult = currentText
                                emit('speech-result', lastResult)
                            }
                        }
                    }
                }
            }
        }
    }


    socket.onerror = (err) => {
        console.error('❌ WebSocket 错误:', err)
        stopRecording()
    }

    socket.onclose = () => {
        console.log('🛑 WebSocket 关闭')
    }
}

function stopRecording() {
    isRecording.value = false
    remainingTime.value = 60
    clearInterval(timer)

    if (socket?.readyState === WebSocket.OPEN) {
        // 发送结束标记
        socket.send(JSON.stringify({ type: 'end' }))

        // 如果有最终结果，确保发送
        if (finalResult) {
            emit('speech-result', finalResult)
        }

        socket.close()
    }

    // 重置结果
    lastResult = ''
    finalResult = ''

    if (processor) processor.disconnect()
    if (input) input.disconnect()
    if (audioContext) audioContext.close()
    if (stream) stream.getTracks().forEach(t => t.stop())

    processor = null
    input = null
    audioContext = null
    stream = null
}

function stopimportfun() {
    stopRecording()
}
function startCountdown() {
    timer = setInterval(() => {
        remainingTime.value--
        if (remainingTime.value <= 0) stopRecording()
    }, 1000)
}

function formatTime(sec) {
    const s = (sec % 60).toString().padStart(2, '0')
    return `${s}`
}

onUnmounted(() => stopRecording())
</script>

<style scoped>
.voice-controls {
    display: flex;
    align-items: center;
    gap: 8px;
    position: absolute;
    left: 10px;
    bottom: 10px;
}
.voice-controls img {
    width: 35px;
    height: 35px;
    cursor: pointer;
    transition: transform 0.2s;
}
.voice-controls img:hover {
    transform: scale(1.1);
}
.audio-waves {
    display: flex;
    align-items: center;
    height: 25px;
    gap: 3px;
    margin: 0 0 0 10px;
}
.wave {
    width: 3px;
    height: 100%;
    background: #409eff;
    animation: wave 1s ease-in-out infinite;
    transform-origin: bottom;
}
@keyframes wave {
    0%,
    100% {
        transform: scaleY(0.2);
    }
    50% {
        transform: scaleY(1);
    }
}
.wave:nth-child(2n) {
    animation-delay: 0.1s;
}
.wave:nth-child(3n) {
    animation-delay: 0.2s;
}
.wave:nth-child(4n) {
    animation-delay: 0.3s;
}
.wave:nth-child(5n) {
    animation-delay: 0.4s;
}
.countdown-timer {
    font-size: 14px;
    color: #409eff;
    margin: 13px 0 0 0;
}
.result-text {
    font-size: 14px;
    margin-top: 10px;
    color: #333;
}
</style>
