/**
 * 音频流转 PCM 转换器
 * 将 MediaStreamTrack 音频转换为 16kHz 16-bit PCM 格式
 * 
 * 使用场景：图片帧模式 (aiChat_img.vue)
 * - 持续发送小块PCM数据（每约25ms）
 * - 用于实时生成数字人图片帧
 * - 使用 audio-processor.js (AudioWorklet) 进行PCM转换
 * - 但不使用整句检测功能（与视频流模式不同）
 */
export class AudioToPCMConverter {
  constructor(sampleRate = 16000) {
    this.sampleRate = sampleRate
    this.audioContext = null
    this.sourceNode = null
    this.processorNode = null
    this.onPCMData = null
    this.isProcessing = false
    
    // 🚀 音频累积缓冲区（批量发送策略，减少 WebSocket 调用频率）
    this.audioBuffer = []
    this.bufferSize = 0
    this.BATCH_SIZE = 800 // 累积 800 字节（约 25ms）再发送，极低延迟
    
    // ✅ 静音过滤已在 AudioWorklet 层面完成（阈值 0.05）
    // 这里不再需要二次过滤，避免误判
  }

  /**
   * 开始处理音频流
   * @param {MediaStreamTrack} audioTrack - 音频轨道
   * @param {Function} onPCMCallback - PCM 数据回调函数
   */
  async startProcessing(audioTrack, onPCMCallback) {
    if (this.isProcessing) {
      console.warn('⚠️ Audio processing already started')
      return
    }

    try {
      this.onPCMData = onPCMCallback
      
      console.log('🎤 Starting audio processing...')
      
      // 创建音频上下文
      this.audioContext = new (window.AudioContext || window.webkitAudioContext)({
        sampleRate: this.sampleRate
      })
      
      // 从 track 创建 MediaStream
      const stream = new MediaStream([audioTrack])
      
      // 创建音频源
      this.sourceNode = this.audioContext.createMediaStreamSource(stream)
      
      // 优先使用 AudioWorklet，如果不支持则使用 ScriptProcessor
      if (this.audioContext.audioWorklet) {
        await this.initAudioWorklet()
      } else {
        this.initScriptProcessor()
      }
      
      // 连接节点
      this.sourceNode.connect(this.processorNode)
      this.processorNode.connect(this.audioContext.destination)
      
      this.isProcessing = true
      console.log('✅ Audio processing started')
      
    } catch (error) {
      console.error('❌ Failed to start audio processing:', error)
      throw error
    }
  }

  /**
   * 初始化 AudioWorklet (推荐方式)
   */
  async initAudioWorklet() {
    try {
      // 加载 AudioWorklet 模块(添加时间戳防止缓存)
      const timestamp = new Date().getTime()
      await this.audioContext.audioWorklet.addModule(`/static/js/audio-processor.js?v=${timestamp}`)
      console.log(`✅ AudioWorklet 加载完成 (版本: ${timestamp})`)
      
      // 创建 AudioWorklet 节点
      this.processorNode = new AudioWorkletNode(this.audioContext, 'pcm-processor')
      
      // 监听消息
      this.processorNode.port.onmessage = (event) => {
        // 接收到 PCM 数据和调试信息
        const { pcmData, maxAmplitude, sampleCount, isSpeechSegment } = event.data
        
        // 🖼️ 图片帧模式：持续发送小块PCM数据，不等待完整语音段
        // 注意：这里处理的是 AI 的语音回复（remoteAudioTrack），需要实时转换为帧
        if (pcmData && pcmData.byteLength > 0) {
          // 累积到批量大小后发送（减少WebSocket调用频率）
          this.audioBuffer.push(pcmData)
          this.bufferSize += pcmData.byteLength
          
          // 达到批量大小或检测到语音段结束时发送
          if (this.bufferSize >= this.BATCH_SIZE || isSpeechSegment) {
            this.flushBuffer(isSpeechSegment)  // 传递标志
          }
        }
        
        // 旧的整句检测逻辑（已废弃，保留注释供参考）
        /*
        if (isSpeechSegment) {
          console.log(`🎬 Received speech segment: ${pcmData.byteLength} bytes`)
          this.onPCMData?.(pcmData, true)
        } else {
          this.accumulateAndSend(pcmData)
        }
        */
      }
      
      console.log('✅ AudioWorklet initialized')
    } catch (error) {
      console.error('❌ Failed to initialize AudioWorklet:', error)
      throw error
    }
  }

  /**
   * 初始化 ScriptProcessor (兼容模式，已废弃)
   */
  initScriptProcessor() {
    console.warn('⚠️ Using deprecated ScriptProcessorNode (fallback mode)')
    
    const bufferSize = 4096
    this.processorNode = this.audioContext.createScriptProcessor(bufferSize, 1, 1)
    
    this.processorNode.onaudioprocess = (event) => {
      const inputData = event.inputBuffer.getChannelData(0)
      
      // 转换为 16-bit PCM
      const pcmData = this.floatTo16BitPCM(inputData)
      this.onPCMData?.(pcmData)
    }
  }

  /**
   * 转换 Float32Array 到 16-bit PCM
   * @param {Float32Array} float32Array - 浮点音频数据
   * @returns {ArrayBuffer} - PCM 数据
   */
  floatTo16BitPCM(float32Array) {
    const buffer = new ArrayBuffer(float32Array.length * 2)
    const view = new DataView(buffer)
    
    for (let i = 0; i < float32Array.length; i++) {
      // 限制在 [-1, 1] 范围内
      const s = Math.max(-1, Math.min(1, float32Array[i]))
      // 转换为 16-bit 整数
      const val = s < 0 ? s * 0x8000 : s * 0x7FFF
      view.setInt16(i * 2, val, true) // true = little-endian
    }
    
    return buffer
  }

  /**
   * 清空并发送缓冲区数据
   * @param {boolean} isSpeechSegment - 是否为完整语音段
   */
  flushBuffer(isSpeechSegment = false) {
    if (this.audioBuffer.length === 0 || this.bufferSize === 0) {
      return
    }
    
    // 合并所有缓冲的数据
    const mergedBuffer = new Uint8Array(this.bufferSize)
    let offset = 0
    
    for (const chunk of this.audioBuffer) {
      const uint8Array = new Uint8Array(chunk)
      mergedBuffer.set(uint8Array, offset)
      offset += uint8Array.length
    }
    
    // 发送合并后的数据，传递 isSpeechSegment 标志
    if (this.onPCMData) {
      this.onPCMData(mergedBuffer.buffer, isSpeechSegment)
    }
    
    // 清空缓冲区
    this.audioBuffer = []
    this.bufferSize = 0
  }

  /**
   * 累积音频数据并批量发送（带静音检测）
   * @param {ArrayBuffer} pcmData - PCM 数据
   */
  accumulateAndSend(pcmData) {
    // 将 ArrayBuffer 转换为 Uint8Array
    const uint8Array = new Uint8Array(pcmData)
    
    // ✅ AudioWorklet 已经过滤了静音，这里直接累积数据
    this.audioBuffer.push(uint8Array)
    this.bufferSize += uint8Array.length
    
    // 当累积到 BATCH_SIZE 字节时，批量发送
    if (this.bufferSize >= this.BATCH_SIZE) {
      // 合并所有缓冲的数据
      const totalLength = this.bufferSize
      const mergedBuffer = new Uint8Array(totalLength)
      
      let offset = 0
      for (const chunk of this.audioBuffer) {
        mergedBuffer.set(chunk, offset)
        offset += chunk.length
      }
      
      // 发送合并后的数据
      if (this.onPCMData) {
        this.onPCMData(mergedBuffer.buffer)
      }
      
      // 清空缓冲区
      this.audioBuffer = []
      this.bufferSize = 0
    }
  }

  /**
   * 停止处理
   */
  stop() {
    console.log('🛑 Stopping audio processing...')
    
    // 🚀 发送剩余的缓冲数据
    if (this.bufferSize > 0) {
      console.log(`📤 Flushing ${this.bufferSize} bytes of remaining audio data`)
      this.flushBuffer()
    }
    
    if (this.processorNode) {
      try {
        this.processorNode.disconnect()
      } catch (e) {
        console.warn('Failed to disconnect processor node:', e)
      }
      this.processorNode = null
    }
    
    if (this.sourceNode) {
      try {
        this.sourceNode.disconnect()
      } catch (e) {
        console.warn('Failed to disconnect source node:', e)
      }
      this.sourceNode = null
    }
    
    if (this.audioContext) {
      try {
        this.audioContext.close()
      } catch (e) {
        console.warn('Failed to close audio context:', e)
      }
      this.audioContext = null
    }
    
    this.isProcessing = false
    this.onPCMData = null
    
    console.log('✅ Audio processing stopped')
  }

  /**
   * 获取当前处理状态
   */
  getStatus() {
    return {
      isProcessing: this.isProcessing,
      sampleRate: this.sampleRate,
      hasAudioContext: !!this.audioContext,
      contextState: this.audioContext?.state
    }
  }
}

