<template>
  <div class="voice-detector">
    <h1>语音活动检测</h1>
    
    <button @click="toggleMicrophone" :disabled="loading">
      {{ isListening ? '停止监听' : '启用麦克风' }}
    </button>
    
    <div class="volume-meter">
      <div 
        class="volume-level" 
        :style="{ width: volume + '%' }"
        :class="{ 'speaking': isSpeaking }"
      ></div>
    </div>
    
    <div class="status" :class="{ 'active': isSpeaking }">
      {{ statusMessage }}
    </div>
    
    <p v-if="error" class="error">{{ error }}</p>
    
    <!-- 频谱分析可视化 -->
    <canvas ref="canvas" width="300" height="100"></canvas>
  </div>
</template>

<script setup>
import { ref, onUnmounted } from 'vue'

// 状态管理
const isListening = ref(false)
const loading = ref(false)
const volume = ref(0)
const isSpeaking = ref(false)
const statusMessage = ref('点击按钮开始监听')
const error = ref('')

// 音频分析相关变量
const canvas = ref(null)
let audioContext = null
let analyser = null
let microphone = null
let dataArray = null
let animationFrameId = null

// 请求麦克风权限
const requestMicrophone = async () => {
  try {
    return await navigator.mediaDevices.getUserMedia({ 
      audio: {
        noiseSuppression: true,
        echoCancellation: true,
        autoGainControl: true
      }
    })
  } catch (err) {
    error.value = '无法访问麦克风: ' + err.message
    throw err
  }
}

// 开始监听麦克风
const startListening = async () => {
  try {
    loading.value = true
    statusMessage.value = '正在请求麦克风权限...'
    
    const stream = await requestMicrophone()
    statusMessage.value = '正在初始化音频分析...'
    
    // 设置Web Audio API
    audioContext = new (window.AudioContext || window.webkitAudioContext)()
    analyser = audioContext.createAnalyser()
    analyser.fftSize = 256
    
    microphone = audioContext.createMediaStreamSource(stream)
    microphone.connect(analyser)
    
    // 准备数据数组
    const bufferLength = analyser.frequencyBinCount
    dataArray = new Uint8Array(bufferLength)
    
    // 开始分析
    isListening.value = true
    error.value = ''
    statusMessage.value = '正在监听麦克风...'
    
    // 开始绘制频谱和分析音量
    draw()
  } catch (err) {
    error.value = '发生错误: ' + err.message
    // error.value = '发生错误: ' + err
    stopListening()
  } finally {
    loading.value = false
  }
}

// 停止监听
const stopListening = () => {
  if (animationFrameId) {
    cancelAnimationFrame(animationFrameId)
    animationFrameId = null
  }
  
  if (microphone) {
    microphone.disconnect()
    microphone = null
  }
  
  if (audioContext) {
    audioContext.close()
    audioContext = null
  }
  
  isListening.value = false
  volume.value = 0
  isSpeaking.value = false
  statusMessage.value = '已停止监听'
}

// 绘制频谱和分析音量
const draw = () => {
  if (!analyser || !canvas.value) return
  
  animationFrameId = requestAnimationFrame(draw)
  
  // 获取频率数据
  analyser.getByteFrequencyData(dataArray)
  
  // 计算平均音量
  let sum = 0
  for (let i = 0; i < dataArray.length; i++) {
    sum += dataArray[i]
  }
  const average = sum / dataArray.length
  
  // 更新音量(0-100)
  volume.value = Math.min(Math.max(average, 0), 100)
  
  // 判断是否在说话(音量>30)
  isSpeaking.value = volume.value > 30
  statusMessage.value = isSpeaking.value ? '检测到语音输入...' : '静音状态'
  
  // 绘制频谱
  drawSpectrum()
}

// 绘制频谱图
const drawSpectrum = () => {
  const ctx = canvas.value.getContext('2d')
  const width = canvas.value.width
  const height = canvas.value.height
  
  ctx.fillStyle = 'rgb(200, 200, 200)'
  ctx.fillRect(0, 0, width, height)
  
  const barWidth = (width / dataArray.length) * 2.5
  let x = 0
  
  for (let i = 0; i < dataArray.length; i++) {
    const barHeight = (dataArray[i] / 255) * height
    
    ctx.fillStyle = `rgb(50, 50, ${dataArray[i] + 100})`
    ctx.fillRect(x, height - barHeight, barWidth, barHeight)
    
    x += barWidth + 1
  }
}

// 切换麦克风状态
const toggleMicrophone = () => {
  isListening.value ? stopListening() : startListening()
}

// 组件卸载时清理
onUnmounted(() => {
  stopListening()
})
</script>

<style scoped>
.voice-detector {
  max-width: 500px;
  margin: 0 auto;
  padding: 2rem;
  font-family: Arial, sans-serif;
  text-align: center;
}

button {
  padding: 0.75rem 1.5rem;
  background: #4285f4;
  color: white;
  border: none;
  border-radius: 4px;
  font-size: 1rem;
  cursor: pointer;
  margin-bottom: 2rem;
  transition: background 0.2s;
}

button:hover {
  background: #3367d6;
}

button:disabled {
  background: #cccccc;
  cursor: not-allowed;
}

.volume-meter {
  height: 20px;
  background: #f0f0f0;
  border-radius: 10px;
  margin-bottom: 1rem;
  overflow: hidden;
}

.volume-level {
  height: 100%;
  background: #34a853;
  transition: width 0.1s, background 0.3s;
}

.volume-level.speaking {
  background: #ea4335;
}

.status {
  font-size: 1.2rem;
  color: #666;
  margin: 1rem 0;
  opacity: 0.7;
  transition: all 0.3s;
}

.status.active {
  color: #ea4335;
  font-weight: bold;
  opacity: 1;
}

.error {
  color: #ea4335;
  margin-top: 1rem;
}

canvas {
  margin-top: 20px;
  background: #f0f0f0;
  border-radius: 4px;
}
</style>