import { ref, onUnmounted } from 'vue'
import axios from 'axios'

export default function useVoiceRecognition() {
    // 状态变量
    const isMonitoring = ref(false)
    const statusText = ref('准备就绪')
    const isAwake = ref(false)
    const awakeValue = ref('')
    const currentRecording = ref({})
    const volumeLevel = ref(0) // 新增：实时音量显示

    // 音频处理变量
    let audioContext = null
    let mediaStream = null
    let analyser = null
    let mediaRecorder = null
    let silenceTimer = null
    const audioChunks = []
    let isProcessingRecording = false
    let baselineNoise = 0.01 // 默认环境噪音基线

    // 语音识别相关变量
    let speechRecognition = null
    let lastWakeTime = 0

    // 配置参数
    const config = {
        silenceThreshold: 0.01, // 静音阈值(将被动态阈值替代)
        silenceDuration: 2000,  // 静音持续时间(毫秒)
        minRecordingDuration: 1000, // 最小录音时长(毫秒)
        wakeWord: [
            '小声小声', '小孙小孙', '小春小春', '小三小声', '晓声晓声',
            '小深小深', '小森小森', '小神小神', '小生小生', '小申小申',
            '小慎小慎', '小沈小沈', '小身小身', '小肾小肾', '小绅小绅',
            
            '小深小深', '小身小身', '小绅小绅', '小申小申', '小伸小伸', '小参小参',
            '小神小神', '小三小三', '小车小声', '小陈小春',
            '小慎小慎', '小渗小渗', '小肾小肾',
            '小森小森', '小僧小僧',
            '小生小生', '小升小升', '小笙小笙', '小声小声', '小牲小牲',
            '小胜小胜', '小盛小盛',
            '小陈小春', '小晨小晨',
            '小三小声', '小孙小孙',
            '晓声晓声', '想说小声',
            '小车小声',
            '小沈小沈',
            '小睡小睡'
        ],
        closeCommand: '关闭', // 新增关闭指令
        apiEndpoint: 'http://192.168.3.11:8899/ai/asr',
        debounceTime: 1500, // 防抖时间(毫秒)
        requireFinal: false, // 是否必须最终结果
        dynamicThreshold: true,    // 是否启用动态阈值
        thresholdMultiplier: 1.5,   // 动态阈值乘数
        noiseSamplingDuration: 2000, // 环境噪音采样时长(毫秒)
        fftSize: 2048              // FFT分析窗口大小
    }

    // 重置音频资源
    const resetAudioResources = () => {
        if (mediaRecorder && mediaRecorder.state !== 'inactive') {
            mediaRecorder.stop()
        }
        if (silenceTimer) {
            clearTimeout(silenceTimer)
            silenceTimer = null
        }
        audioChunks.length = 0
        isProcessingRecording = false
    }

    // 开始/停止监听
    const toggleMonitoring = async () => {
        if (isMonitoring.value) {
            stopMonitoring()
        } else {
            await startMonitoring()
        }
    }

    // 开始监听唤醒词
    const startMonitoring = async () => {
        try {
            // 确保用户已与页面交互(解决Chrome自动暂停问题)
            if (typeof document !== 'undefined') {
                await new Promise(resolve => {
                    const handleInteraction = () => {
                        document.removeEventListener('click', handleInteraction)
                        document.removeEventListener('touchstart', handleInteraction)
                        resolve()
                    }
                    document.addEventListener('click', handleInteraction)
                    document.addEventListener('touchstart', handleInteraction)
                })
            }

            resetRecordingState()
            isAwake.value = false
            awakeValue.value = ''

            // 重新初始化音频上下文和媒体流
            if (audioContext) {
                await audioContext.close()
            }
            audioContext = new (window.AudioContext || window.webkitAudioContext)()

            if (mediaStream) {
                mediaStream.getTracks().forEach(track => track.stop())
            }

            // 获取麦克风权限(禁用浏览器自动处理)
            mediaStream = await navigator.mediaDevices.getUserMedia({
                audio: {
                    echoCancellation: false,
                    noiseSuppression: false,
                    autoGainControl: false
                }
            })

            // 设置音频分析器
            analyser = audioContext.createAnalyser()
            analyser.fftSize = config.fftSize
            const source = audioContext.createMediaStreamSource(mediaStream)
            source.connect(analyser)

            // 重新初始化语音识别
            if (speechRecognition) {
                speechRecognition.stop()
            }
            initSpeechRecognition()

            isMonitoring.value = true
            statusText.value = '等待唤醒词...'

            console.log('音频监控已启动，AudioContext状态:', audioContext.state)
        } catch (err) {
            console.error('监听失败:', err)
            statusText.value = `错误: ${err.message}`
            stopMonitoring()
        }
    }


    const hideIsAwake = () => {
        isAwake.value = false;
        awakeValue.value = '';
    }

    // 初始化语音识别
    const initSpeechRecognition = () => {
        const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
        if (!SpeechRecognition) {
            throw new Error('您的浏览器不支持语音识别API')
        }

        speechRecognition = new SpeechRecognition()
        speechRecognition.continuous = true
        speechRecognition.interimResults = true
        speechRecognition.lang = 'zh-CN'

        speechRecognition.onresult = (event) => {
            if (!isMonitoring.value || isAwake.value) return

            const now = Date.now()
            if (now - lastWakeTime < config.debounceTime) return

            for (let i = event.resultIndex; i < event.results.length; i++) {
                if (config.requireFinal && !event.results[i].isFinal) continue

                const transcript = event.results[i][0].transcript.trim()
                console.log(`识别结果[${event.results[i].isFinal ? '最终' : '中间'}]:`, transcript)

                // 检测关闭指令（无论是否在唤醒状态）
                if (transcript.includes(config.closeCommand)) {
                    hideIsAwake();
                    return
                }

                // 检查所有唤醒词
                for (const word of config.wakeWord) {
                    // 创建更灵活的正则表达式，允许词前后有空格或标点
                    const wakeWordRegex = new RegExp(`(^|[\\s\\p{P}])${word}([\\s\\p{P}]|$)`, 'u')
                    if (wakeWordRegex.test(transcript)) {
                        lastWakeTime = now
                        isAwake.value = true
                        awakeValue.value = word // 记录是哪个唤醒词触发的
                        statusText.value = `已唤醒(${word})，开始录音...`
                        startRecording()
                        return // 找到一个匹配就返回
                    }
                }
            }
        }

        speechRecognition.onerror = (event) => {
            console.error('语音识别错误:', event.error)
            if (event.error === 'no-speech' || event.error === 'audio-capture') {
                statusText.value = '无法访问麦克风'
            } else if (event.error === 'not-allowed') {
                statusText.value = '麦克风权限被拒绝'
            }

            // 出错后尝试重新启动
            if (isMonitoring.value) {
                setTimeout(() => {
                    if (isMonitoring.value) {
                        speechRecognition.start()
                    }
                }, 4000)
            }
        }

        speechRecognition.onend = () => {
            if (isMonitoring.value) {
                speechRecognition.start()
            }
        }

        speechRecognition.start()
    }

    // 开始录音
    const startRecording = () => {
        resetAudioResources()

        mediaRecorder = new MediaRecorder(mediaStream, {
            mimeType: 'audio/webm',
            audioBitsPerSecond: 128000
        })

        mediaRecorder.ondataavailable = (e) => {
            if (e.data.size > 0) {
                audioChunks.push(e.data)
            }
        }

        mediaRecorder.onstop = () => {
            if (!isAwake.value && !isProcessingRecording) {
                isProcessingRecording = true
                processRecording()
            }
        }

        mediaRecorder.start(100)
        setupSilenceDetection()
    }

    // 设置静音检测
    const setupSilenceDetection = () => {
        const dataArray = new Uint8Array(analyser.frequencyBinCount)

        // 校准环境噪音
        const calibrateNoise = () => {
            return new Promise(resolve => {
                let samples = []
                let sampleCount = 0
                const startTime = Date.now()

                const sampleNoise = () => {
                    analyser.getByteTimeDomainData(dataArray)
                    const currentVolume = calculateRMS(dataArray)
                    samples.push(currentVolume)
                    sampleCount++

                    if (Date.now() - startTime < config.noiseSamplingDuration) {
                        requestAnimationFrame(sampleNoise)
                    } else {
                        // 计算中位数作为噪音基线
                        samples.sort((a, b) => a - b)
                        baselineNoise = samples[Math.floor(samples.length / 2)]
                        console.log('环境噪音校准完成，基线值:', baselineNoise)
                        resolve()
                    }
                }

                sampleNoise()
            })
        }

        // 计算RMS(均方根)音量
        const calculateRMS = (array) => {
            let sum = 0
            for (let i = 0; i < array.length; i++) {
                const value = (array[i] - 128) / 128
                sum += value * value
            }
            return Math.sqrt(sum / array.length)
        }

        // 开始检测
        const startDetection = async () => {
            await calibrateNoise()

            const checkSilence = () => {
                if (!isMonitoring.value || !isAwake.value) return

                analyser.getByteTimeDomainData(dataArray)
                const currentVolume = calculateRMS(dataArray)
                volumeLevel.value = Math.min(1, currentVolume * 5) // 缩放音量用于显示

                // 动态阈值计算
                const threshold = config.dynamicThreshold
                    ? baselineNoise * config.thresholdMultiplier
                    : config.silenceThreshold

                const isSilent = currentVolume < threshold;

                console.log(`音量: ${currentVolume.toFixed(4)}, 阈值: ${threshold.toFixed(4)}, 静音: ${isSilent}`)

                if (isSilent) {
                    if (!silenceTimer) {
                        silenceTimer = setTimeout(() => {
                            if (isMonitoring.value && isAwake.value) {
                                stopRecording()
                                isAwake.value = false
                                statusText.value = '录音结束，处理中...'
                            }
                        }, config.silenceDuration)
                    }
                } else {
                    if (silenceTimer) {
                        clearTimeout(silenceTimer)
                        silenceTimer = null
                    }
                }

                if (isMonitoring.value) {
                    requestAnimationFrame(checkSilence)
                }
            }

            checkSilence()
        }

        // 确保AudioContext已恢复
        if (audioContext.state === 'suspended') {
            audioContext.resume().then(() => {
                console.log('AudioContext已恢复')
                startDetection()
            }).catch(err => {
                console.error('恢复AudioContext失败:', err)
            })
        } else {
            startDetection()
        }
    }

    // 停止录音
    const stopRecording = () => {
        if (mediaRecorder && mediaRecorder.state === 'recording') {
            mediaRecorder.stop()
        }
    }

    // 处理录音片段
    const processRecording = async () => {
        try {
            const webmBlob = new Blob(audioChunks, { type: 'audio/webm' })
            audioChunks.length = 0

            const duration = await getAudioDuration(webmBlob)
            if (duration * 1000 < config.minRecordingDuration) {
                statusText.value = '片段过短已忽略'
                isProcessingRecording = false
                return
            }

            const mp3Blob = await convertToMP3(webmBlob)

            currentRecording.value = {
                blob: mp3Blob,
                duration: duration,
                timestamp: new Date()
            }

            statusText.value = '正在上传录音...'
            await uploadRecording(currentRecording.value)

        } catch (err) {
            console.error('处理失败:', err)
            statusText.value = `处理错误: ${err.message}`
        } finally {
            isProcessingRecording = false
        }
    }

    // 自动上传录音到后端
    const uploadRecording = async (recording) => {
        if (!recording?.blob) return
        try {
            const formData = new FormData()
            formData.append('audio', recording.blob, `recording_${Date.now()}.mp3`)
            formData.append('userId', '1')
            formData.append('sessionId', '2')

            const { data } = await axios.post(config.apiEndpoint, formData, {
                headers: {
                    'Content-Type': 'multipart/form-data'
                }
            })

            awakeValue.value = data.data || '无返回内容'
            statusText.value = '上传完成，等待唤醒...'

        } catch (err) {
            console.error('上传失败:', err)
            statusText.value = `上传错误: ${err.message}`
            throw err
        }
    }

    // 重置录音状态
    const resetRecordingState = () => {
        resetAudioResources()
        currentRecording.value = {}
        volumeLevel.value = 0
    }

    // 停止监听
    const stopMonitoring = () => {
        if (speechRecognition) {
            speechRecognition.stop()
        }
        stopRecording()
        if (mediaStream) {
            mediaStream.getTracks().forEach(track => track.stop())
        }
        if (audioContext) {
            audioContext.close()
        }
        isMonitoring.value = false
        isAwake.value = false
        statusText.value = '监听已停止'
        volumeLevel.value = 0
    }

    // 获取音频时长
    const getAudioDuration = (blob) => {
        return new Promise((resolve) => {
            const audio = new Audio()
            audio.src = URL.createObjectURL(blob)
            audio.onloadedmetadata = () => {
                resolve(audio.duration)
                URL.revokeObjectURL(audio.src)
            }
            audio.onerror = () => {
                resolve(0)
            }
        })
    }

    // 转换为MP3
    const convertToMP3 = async (webmBlob) => {
        return new Promise(async (resolve, reject) => {
            try {
                const lamejs = await loadLameJS()
                const { Mp3Encoder } = lamejs

                const arrayBuffer = await webmBlob.arrayBuffer()
                const audioBuffer = await audioContext.decodeAudioData(arrayBuffer)

                const channel = audioBuffer.numberOfChannels > 1 ? 1 : 0
                const float32Array = audioBuffer.getChannelData(channel)
                const pcmData = new Int16Array(float32Array.length)

                for (let i = 0; i < float32Array.length; i++) {
                    pcmData[i] = Math.min(1, float32Array[i]) * 0x7FFF
                }

                const encoder = new Mp3Encoder(1, 44100, 128)
                const mp3Data = []
                const sampleBlockSize = 1152

                for (let i = 0; i < pcmData.length; i += sampleBlockSize) {
                    const chunk = pcmData.subarray(i, i + sampleBlockSize)
                    const mp3Chunk = encoder.encodeBuffer(chunk)
                    if (mp3Chunk.length > 0) mp3Data.push(mp3Chunk)
                }

                const lastChunk = encoder.flush()
                if (lastChunk.length > 0) mp3Data.push(lastChunk)

                resolve(new Blob(mp3Data, { type: 'audio/mp3' }))
            } catch (err) {
                console.error('MP3转换失败:', err)
                reject(err)
            }
        })
    }

    // 动态加载lamejs
    const loadLameJS = () => {
        return new Promise((resolve, reject) => {
            if (window.lamejs) return resolve(window.lamejs)

            const script = document.createElement('script')
            script.src = '/lame.min.js' // 读取录音文件
            script.onload = () => resolve(window.lamejs)
            script.onerror = () => reject(new Error('加载lamejs失败'))
            document.head.appendChild(script)
        })
    }

    // 清理资源
    onUnmounted(() => {
        stopMonitoring()
    })

    return {
        isMonitoring,
        statusText,
        isAwake,
        awakeValue,
        volumeLevel, // 新增：实时音量
        toggleMonitoring,
        startMonitoring,
        stopMonitoring,
        hideIsAwake
    }
}