import { ref, computed } from 'vue'

export interface VADOptions {
  silenceThreshold: number // 静音阈值 (0-100)
  silenceDuration: number // 静音持续时间(ms)后自动发送
  minSpeechDuration: number // 最小说话时间(ms)
  sampleRate: number // 采样率
}

export interface VADState {
  isSpeaking: boolean
  volume: number
  speechStartTime: number | null
  lastSpeechTime: number | null
}

export function useVoiceActivityDetection(options: VADOptions = {
  silenceThreshold: 15,
  silenceDuration: 2000, // 2秒静音后发送
  minSpeechDuration: 500, // 最少说话0.5秒
  sampleRate: 16000
}) {
  
  const state = ref<VADState>({
    isSpeaking: false,
    volume: 0,
    speechStartTime: null,
    lastSpeechTime: null
  })

  let audioContext: AudioContext | null = null
  let analyser: AnalyserNode | null = null
  let mediaStream: MediaStream | null = null
  let animationFrameId: number | null = null
  let silenceTimer: number | null = null
  
  // 回调函数
  let onSpeechStart: (() => void) | null = null
  let onSpeechEnd: ((audioData: Blob) => void) | null = null
  let onVolumeChange: ((volume: number) => void) | null = null

  // 音频数据收集
  let mediaRecorder: MediaRecorder | null = null
  let audioChunks: Blob[] = []

  /**
   * 初始化语音活动检测
   */
  const initVAD = async (stream: MediaStream) => {
    try {
      mediaStream = stream
      
      // 创建音频上下文
      audioContext = new (window.AudioContext || (window as any).webkitAudioContext)({
        sampleRate: options.sampleRate
      })
      
      // 创建分析器
      analyser = audioContext.createAnalyser()
      analyser.fftSize = 2048
      analyser.smoothingTimeConstant = 0.8
      
      // 连接音频流
      const source = audioContext.createMediaStreamSource(stream)
      source.connect(analyser)
      
      // 创建录音器
      mediaRecorder = new MediaRecorder(stream, {
        mimeType: 'audio/webm;codecs=opus'
      })
      
      mediaRecorder.ondataavailable = (event) => {
        if (event.data.size > 0) {
          audioChunks.push(event.data)
        }
      }
      
      mediaRecorder.onstop = () => {
        if (audioChunks.length > 0) {
          const audioBlob = new Blob(audioChunks, { type: 'audio/webm' })
          onSpeechEnd?.(audioBlob)
          audioChunks = []
        }
      }
      
      // 开始音量检测
      startVolumeDetection()
      
      console.log('VAD初始化成功')
      return true
      
    } catch (error) {
      console.error('VAD初始化失败:', error)
      return false
    }
  }

  /**
   * 开始音量检测循环
   */
  const startVolumeDetection = () => {
    if (!analyser) return

    const bufferLength = analyser.frequencyBinCount
    const dataArray = new Uint8Array(bufferLength)
    
    const detectVolume = () => {
      if (!analyser) return
      
      analyser.getByteFrequencyData(dataArray)
      
      // 计算平均音量
      let sum = 0
      for (let i = 0; i < bufferLength; i++) {
        sum += dataArray[i]
      }
      const averageVolume = sum / bufferLength
      
      // 更新状态
      state.value.volume = Math.round(averageVolume)
      onVolumeChange?.(state.value.volume)
      
      // 检测是否在说话
      const currentTime = Date.now()
      const wasSpeaking = state.value.isSpeaking
      const isSpeaking = averageVolume > options.silenceThreshold
      
      if (isSpeaking && !wasSpeaking) {
        // 开始说话
        handleSpeechStart(currentTime)
      } else if (!isSpeaking && wasSpeaking) {
        // 停止说话
        handleSpeechPause(currentTime)
      } else if (isSpeaking) {
        // 继续说话
        state.value.lastSpeechTime = currentTime
        clearSilenceTimer()
      }
      
      state.value.isSpeaking = isSpeaking
      
      // 继续检测
      animationFrameId = requestAnimationFrame(detectVolume)
    }
    
    detectVolume()
  }

  /**
   * 处理开始说话
   */
  const handleSpeechStart = (currentTime: number) => {
    console.log('🎤 检测到开始说话')
    
    state.value.speechStartTime = currentTime
    state.value.lastSpeechTime = currentTime
    state.value.isSpeaking = true
    
    // 清除静音计时器
    clearSilenceTimer()
    
    // 开始录音
    if (mediaRecorder && mediaRecorder.state === 'inactive') {
      audioChunks = []
      mediaRecorder.start()
      console.log('📹 开始录音')
    }
    
    // 触发回调
    onSpeechStart?.()
  }

  /**
   * 处理说话暂停
   */
  const handleSpeechPause = (currentTime: number) => {
    console.log('⏸️ 检测到说话暂停')
    
    // 检查是否说话时间足够长
    const speechDuration = state.value.speechStartTime 
      ? currentTime - state.value.speechStartTime 
      : 0
    
    if (speechDuration < options.minSpeechDuration) {
      console.log('🚫 说话时间太短，忽略')
      return
    }
    
    // 开始静音计时
    startSilenceTimer()
  }

  /**
   * 开始静音计时器
   */
  const startSilenceTimer = () => {
    clearSilenceTimer()
    
    console.log(`⏰ 开始静音计时 ${options.silenceDuration}ms`)
    
    silenceTimer = window.setTimeout(() => {
      console.log('🔇 静音超时，自动发送语音')
      finalizeSpeech()
    }, options.silenceDuration)
  }

  /**
   * 清除静音计时器
   */
  const clearSilenceTimer = () => {
    if (silenceTimer) {
      clearTimeout(silenceTimer)
      silenceTimer = null
    }
  }

  /**
   * 完成语音输入
   */
  const finalizeSpeech = () => {
    console.log('✅ 完成语音输入')
    
    // 停止录音
    if (mediaRecorder && mediaRecorder.state === 'recording') {
      mediaRecorder.stop()
      console.log('🛑 停止录音')
    }
    
    // 重置状态
    state.value.speechStartTime = null
    state.value.isSpeaking = false
    
    // 清除计时器
    clearSilenceTimer()
  }

  /**
   * 手动触发发送
   */
  const forceSend = () => {
    if (state.value.isSpeaking || state.value.speechStartTime) {
      finalizeSpeech()
    }
  }

  /**
   * 停止VAD
   */
  const stopVAD = () => {
    // 停止动画帧
    if (animationFrameId) {
      cancelAnimationFrame(animationFrameId)
      animationFrameId = null
    }
    
    // 清除计时器
    clearSilenceTimer()
    
    // 停止录音
    if (mediaRecorder && mediaRecorder.state === 'recording') {
      mediaRecorder.stop()
    }
    
    // 关闭音频上下文
    if (audioContext) {
      audioContext.close()
      audioContext = null
    }
    
    // 重置状态
    state.value = {
      isSpeaking: false,
      volume: 0,
      speechStartTime: null,
      lastSpeechTime: null
    }
    
    console.log('VAD已停止')
  }

  /**
   * 设置回调函数
   */
  const setCallbacks = (callbacks: {
    onSpeechStart?: () => void
    onSpeechEnd?: (audioData: Blob) => void
    onVolumeChange?: (volume: number) => void
  }) => {
    onSpeechStart = callbacks.onSpeechStart || null
    onSpeechEnd = callbacks.onSpeechEnd || null
    onVolumeChange = callbacks.onVolumeChange || null
  }

  /**
   * 更新配置
   */
  const updateOptions = (newOptions: Partial<VADOptions>) => {
    Object.assign(options, newOptions)
    console.log('VAD配置已更新:', options)
  }

  // 计算属性
  const isListening = computed(() => 
    state.value.speechStartTime !== null || state.value.isSpeaking
  )
  
  const speechDuration = computed(() => 
    state.value.speechStartTime 
      ? Date.now() - state.value.speechStartTime 
      : 0
  )

  return {
    // 状态
    state: state.value,
    isListening,
    speechDuration,
    
    // 方法
    initVAD,
    stopVAD,
    forceSend,
    setCallbacks,
    updateOptions,
    
    // 配置
    options
  }
}

