import { VoiceRecordingState } from '@/types'

export class VoiceRecordingService {
  private mediaRecorder: MediaRecorder | null = null
  private audioStream: MediaStream | null = null
  private audioChunks: Blob[] = []
  private state: VoiceRecordingState = VoiceRecordingState.IDLE
  private onStateChange?: (state: VoiceRecordingState) => void
  private onAudioData?: (data: ArrayBuffer) => void
  private vadEnabled = true // 语音端点检测
  private silenceThreshold = -40 // 静音阈值（dB）- 调整更敏感
  private silenceTimeout = 2000 // 静音超时时间（ms）
  private silenceTimer: number | null = null
  private audioContext: AudioContext | null = null
  private analyser: AnalyserNode | null = null
  private dataArray: Uint8Array | null = null
  
  // 新增：音频缓冲相关
  private audioBuffer: ArrayBuffer[] = []
  private bufferThreshold = 1024 * 1024 // 1MB缓冲阈值
  private isRecording = false
  private hasDetectedSpeech = false
  private speechStartTime = 0

  constructor() {
    this.setupAudioContext()
  }

  // 设置音频上下文用于语音检测
  private async setupAudioContext() {
    try {
      this.audioContext = new (window.AudioContext || (window as any).webkitAudioContext)()
    } catch (error) {
      console.error('创建音频上下文失败:', error)
    }
  }

  // 设置状态变化回调
  onStateChanged(callback: (state: VoiceRecordingState) => void) {
    this.onStateChange = callback
  }

  // 设置音频数据回调
  onAudioDataReceived(callback: (data: ArrayBuffer) => void) {
    this.onAudioData = callback
  }

  // 开始录音
  async startRecording(): Promise<void> {
    try {
      // 获取音频流
      this.audioStream = await navigator.mediaDevices.getUserMedia({
        audio: {
          echoCancellation: true,
          noiseSuppression: true,
          sampleRate: 16000,
          channelCount: 1
        }
      })

      // 设置语音活动检测
      if (this.vadEnabled && this.audioContext) {
        this.setupVoiceActivityDetection()
      }

      // 创建MediaRecorder - 使用更兼容的格式
      const mimeType = this.getSupportedMimeType()
      this.mediaRecorder = new MediaRecorder(this.audioStream, {
        mimeType: mimeType
      })

      this.audioChunks = []
      this.audioBuffer = []
      this.isRecording = true
      this.hasDetectedSpeech = false

      this.mediaRecorder.ondataavailable = (event) => {
        if (event.data.size > 0) {
          this.audioChunks.push(event.data)
          
          // 缓冲音频数据
          event.data.arrayBuffer().then(buffer => {
            this.audioBuffer.push(buffer)
            
            // 检查是否需要发送缓冲数据
            this.checkBufferThreshold()
          })
        }
      }

      this.mediaRecorder.onstop = () => {
        this.handleRecordingStop()
      }

      // 开始录音，每200ms发送一次数据
      this.mediaRecorder.start(200)
      this.setState(VoiceRecordingState.RECORDING)
      
      console.log('开始录音，使用格式:', mimeType)
    } catch (error) {
      console.error('开始录音失败:', error)
      throw error
    }
  }

  // 获取支持的MIME类型 - 优先使用WAV格式
  private getSupportedMimeType(): string {
    const types = [
      'audio/wav',
      'audio/webm;codecs=opus',
      'audio/webm',
      'audio/mp4'
    ]
    
    for (const type of types) {
      if (MediaRecorder.isTypeSupported(type)) {
        console.log('选择的音频格式:', type)
        return type
      }
    }
    
    return 'audio/wav' // 默认使用WAV格式
  }

  // 检查缓冲阈值
  private checkBufferThreshold() {
    if (!this.hasDetectedSpeech) return
    
    const totalSize = this.audioBuffer.reduce((sum, buffer) => sum + buffer.byteLength, 0)
    
    if (totalSize >= this.bufferThreshold) {
      this.sendBufferedAudio()
    }
  }

  // 发送缓冲的音频数据
  private sendBufferedAudio() {
    if (this.audioBuffer.length === 0) return
    
    // 合并所有缓冲的音频数据
    const totalSize = this.audioBuffer.reduce((sum, buffer) => sum + buffer.byteLength, 0)
    const combinedBuffer = new ArrayBuffer(totalSize)
    const combinedView = new Uint8Array(combinedBuffer)
    
    let offset = 0
    for (const buffer of this.audioBuffer) {
      combinedView.set(new Uint8Array(buffer), offset)
      offset += buffer.byteLength
    }
    
    // 发送数据
    if (this.onAudioData) {
      this.onAudioData(combinedBuffer)
    }
    
    // 清空缓冲
    this.audioBuffer = []
    
    console.log('发送缓冲音频数据，大小:', totalSize)
  }

  // 停止录音
  stopRecording(): void {
    if (this.mediaRecorder && this.mediaRecorder.state === 'recording') {
      this.mediaRecorder.stop()
    }
    
    if (this.silenceTimer) {
      clearTimeout(this.silenceTimer)
      this.silenceTimer = null
    }
    
    this.isRecording = false
    
    // 发送剩余的缓冲数据
    if (this.audioBuffer.length > 0) {
      this.sendBufferedAudio()
    }
  }

  // 设置语音活动检测
  private setupVoiceActivityDetection() {
    if (!this.audioContext || !this.audioStream) return

    const source = this.audioContext.createMediaStreamSource(this.audioStream)
    this.analyser = this.audioContext.createAnalyser()
    this.analyser.fftSize = 256
    this.dataArray = new Uint8Array(this.analyser.frequencyBinCount)
    
    source.connect(this.analyser)
    
    this.monitorAudioLevel()
  }

  // 监控音频电平
  private monitorAudioLevel() {
    if (!this.analyser || !this.dataArray) return

    const checkAudioLevel = () => {
      if (!this.isRecording) return

      this.analyser!.getByteFrequencyData(this.dataArray!)
      
      // 计算平均音量
      const average = this.dataArray!.reduce((sum, value) => sum + value, 0) / this.dataArray!.length
      const decibels = 20 * Math.log10(average / 255)
      
      // 检测语音开始
      if (!this.hasDetectedSpeech && decibels > this.silenceThreshold) {
        this.hasDetectedSpeech = true
        this.speechStartTime = Date.now()
        console.log('检测到语音开始，音量:', decibels.toFixed(2), 'dB')
      }
      
      if (this.hasDetectedSpeech) {
        if (decibels < this.silenceThreshold) {
          // 检测到静音
          if (!this.silenceTimer) {
            this.silenceTimer = window.setTimeout(() => {
              console.log('检测到静音，自动停止录音')
              this.stopRecording()
            }, this.silenceTimeout)
          }
        } else {
          // 有声音，清除静音计时器
          if (this.silenceTimer) {
            clearTimeout(this.silenceTimer)
            this.silenceTimer = null
          }
        }
      }
      
      requestAnimationFrame(checkAudioLevel)
    }
    
    checkAudioLevel()
  }

  // 处理录音停止
  private handleRecordingStop() {
    // 停止音频流
    if (this.audioStream) {
      this.audioStream.getTracks().forEach(track => track.stop())
      this.audioStream = null
    }

    this.isRecording = false
    this.setState(VoiceRecordingState.PROCESSING)
    console.log('录音已停止，音频片段数量:', this.audioChunks.length)
    
    // 发送最终的完整音频数据
    if (this.audioChunks.length > 0) {
      const finalBlob = new Blob(this.audioChunks, { type: 'audio/wav' })
      finalBlob.arrayBuffer().then(buffer => {
        if (this.onAudioData) {
          this.onAudioData(buffer)
        }
      })
    }
  }

  // 获取录音结果
  getRecordingBlob(): Blob | null {
    if (this.audioChunks.length === 0) return null
    return new Blob(this.audioChunks, { type: 'audio/wav' })
  }

  // 设置状态
  private setState(newState: VoiceRecordingState) {
    if (this.state !== newState) {
      this.state = newState
      this.onStateChange?.(newState)
    }
  }

  // 获取当前状态
  get currentState(): VoiceRecordingState {
    return this.state
  }

  // 启用/禁用语音端点检测
  setVADEnabled(enabled: boolean) {
    this.vadEnabled = enabled
  }

  // 设置静音阈值
  setSilenceThreshold(threshold: number) {
    this.silenceThreshold = threshold
  }

  // 设置静音超时时间
  setSilenceTimeout(timeout: number) {
    this.silenceTimeout = timeout
  }

  // 清理资源
  dispose() {
    this.stopRecording()
    if (this.audioContext) {
      this.audioContext.close()
      this.audioContext = null
    }
  }
}

// 音频播放服务
export class AudioPlaybackService {
  private audioContext: AudioContext | null = null
  private currentSource: AudioBufferSourceNode | null = null

  constructor() {
    this.setupAudioContext()
  }

  private async setupAudioContext() {
    try {
      this.audioContext = new (window.AudioContext || (window as any).webkitAudioContext)()
    } catch (error) {
      console.error('创建音频上下文失败:', error)
    }
  }

  // 播放音频数据
  async playAudio(audioData: ArrayBuffer): Promise<void> {
    if (!this.audioContext) {
      throw new Error('音频上下文未初始化')
    }

    try {
      const audioBuffer = await this.audioContext.decodeAudioData(audioData)
      
      this.currentSource = this.audioContext.createBufferSource()
      this.currentSource.buffer = audioBuffer
      this.currentSource.connect(this.audioContext.destination)
      
      this.currentSource.start()
      
      return new Promise((resolve) => {
        this.currentSource!.onended = () => {
          resolve()
        }
      })
    } catch (error) {
      console.error('播放音频失败:', error)
      throw error
    }
  }

  // 停止播放
  stopPlayback() {
    if (this.currentSource) {
      this.currentSource.stop()
      this.currentSource = null
    }
  }

  // 清理资源
  dispose() {
    this.stopPlayback()
    if (this.audioContext) {
      this.audioContext.close()
      this.audioContext = null
    }
  }
}
