<script setup>
import { ref, onUnmounted } from 'vue'
import {speechApi} from '@/apis/alHelper'
import speechStatus from '@/views/content/util/speeh/speechStatus.vue'


const recorder = ref(null) // 录音对象
const chunks = ref([]) // 录音数据块
const base64Data = ref('') 
const stream = ref(null) // 麦克风媒体流

const speechIng=ref(false)

// 开始录音
const startRecording = async () => {
  speechIng.value=true
  if (recorder.value && recorder.value.state === 'recording') {
    return // 如果正在录音，则不做任何操作
  }
  try {
     // 获取麦克风媒体流
    // stream.value = await navigator.mediaDevices.getUserMedia({ audio: true })
    stream.value = await navigator.mediaDevices.getUserMedia({ audio: { sampleRate: 16000, channelCount: 1, volume: 1.0, sampleSize: 16 } })

    
    // recorder.value = new  MediaRecorder(stream.value, { audioBitsPerSecond: 16000 }) // 创建录音对象
    recorder.value = new  MediaRecorder(stream.value) // 创建录音对象
    chunks.value = [] //置空
    base64Data.value = '' 

    // 添加监听器，将录音数据块存储到数组中
    recorder.value.addEventListener('dataavailable', (event) => {
      if (event.data.size > 0) {
        chunks.value.push(event.data)
      }
    })

    // 添加监听器，录音停止时触发
    recorder.value.addEventListener('stop', () => {
      const blob = new Blob(chunks.value, { type: 'audio/wav' }) // 将录音数据块转换为Blob对象

      const reader = new FileReader()
      reader.onloadend = async() => {
        base64Data.value = reader.result.split(',')[1] // 获取Base64编码
        console.log(base64Data.value)
        const res= await speechApi(base64Data.value) // 调用语音API，传递音频数据
        if(res.code===-1){
          ElMessage({type:'error',message:res.data})
        }
        if(res.code===0){
          ElMessage({type:'error',message:res.data})
        }

      }
      reader.readAsDataURL(blob) // 以DataURL形式读取Blob对象的内容
      chunks.value = [] // 清空录音数据块
    })

    recorder.value.start() // 开始录音
    console.log('开始录音...')
  } catch (error) {
    console.error('获取麦克风权限失败:', error)
  }
}

// 停止录音
const stopRecording = () => {
  if (recorder.value && recorder.value.state === 'recording') {
    recorder.value.stop()
    console.log('停止录音...')
    if (stream.value) {
      stream.value.getTracks().forEach(track => track.stop()) // 停止所有轨道，释放资源
      stream.value = null // 清除媒体流
    }
  }
  speechIng.value=false
}

// 如果正在录音，则停止录音
const stopRecordingIfRecording = () => {
  if (recorder.value && recorder.value.state === 'recording') {
    stopRecording()
  }
}

// 组件卸载时执行的清理工作
onUnmounted(() => {
  if (stream.value) {
    stream.value.getTracks().forEach(track => track.stop()) // 停止所有轨道，释放资源
    stream.value = null // 清除媒体流
  }
})
</script>

<template>
  <div>
    <!-- 鼠标按住 mic 元素开启录音，松开或鼠标离开元素时停止录音 -->
    <el-tooltip class="box-item" effect="dark" content="按住说话" placement="right">
      <div class="mic" @mousedown="startRecording" @mouseup="stopRecording" @mouseleave="stopRecordingIfRecording"></div>
    </el-tooltip>

    <div v-show="speechIng">
                <speechStatus></speechStatus>
            </div>
  </div>
</template>

<style lang="scss" scoped>
.mic {
  width: 40px;
  height: 40px;
  background-image: url('@/assets/语音.png'); // 请确保图片路径正确
  background-size: cover;
  background-position: center;
  cursor: pointer; // 添加指针样式，表示 mic 是可交互的
}
</style>
