import Vosk from 'vosk-browser'
/**
 * 语音识别器类 - 使用Vosk第三方库
 * 实现本地语音识别功能，支持实时识别、结果合并等
 */
class SpeechRecognizer {
  constructor() {
    this.model = null
    this.recognizer = null
    this.isRecognizing = false
    this.recognitionResults = []
  }

  /**
   * 初始化语音识别器
   * @param {Object} options - 配置选项
   * @param {string} options.modelPath - Vosk模型路径
   */
  async initialize(options = {}) {
    try {
      // 获取扩展的URL，用于访问扩展资源
      const extensionUrl = chrome.runtime.getURL('/assets/models/vosk-model-cn-0.22.zip')

      // 构建模型的绝对路径 - 现在指向public目录
      this.modelPath = options.modelPath || extensionUrl
      console.log('模型路径:', this.modelPath)

      // 加载Vosk模型
      this.model = await Vosk.createModel(this.modelPath)
      console.log('模型创建成功')
      return true
    } catch (error) {
      console.error('语音识别器初始化失败:', error)
      if (this.onError) {
        this.onError(error)
      }
      return false
    }
  }

  /**
   * 开始实时语音识别
   * @param {MediaStream} audioStream - 音频流
   */
  async startRecognition() {
    if (this.isRecognizing) {
      console.warn('语音识别已在进行中')
      return false
    }

    if (!this.model) {
      console.error('语音识别模型未加载')
      return false
    }

    try {
      this.isRecognizing = true
      this.recognitionResults = []
      this.currentSegment = ''

      // 创建音频上下文
      this.audioContext = new (window.AudioContext || window.webkitAudioContext)()

      // 创建音频源
      let element = document.getElementsByTagName('video')[0]
      this.audioSource = this.audioContext.createMediaElementSource(element)

      // 创建识别器
      this.recognizer = new this.model.KaldiRecognizer(this.audioContext.sampleRate)
      this.recognizer.on('partialresult', function (message) {
        if (message.result.partial.length) {
          //console.log(message.result.partial)
        }
      })
      this.recognizer.on('result', function (message) {
        console.log(message.result.text)
      })

      // 设置识别器参数
      this.recognizer.setWords(true)

      // 创建音频处理器
      this.audioProcessor = this.audioContext.createScriptProcessor(4096, 1, 1)

      // 连接音频节点
      this.audioSource.connect(this.audioProcessor)
      this.audioProcessor.connect(this.audioContext.destination)

      // 处理音频数据
      this.audioProcessor.onaudioprocess = (event) => {
        this.recognizer.acceptWaveform(event.inputBuffer)
        const outputBuffer = event.outputBuffer
        const inputData = event.inputBuffer.getChannelData(0)
        const outputData = outputBuffer.getChannelData(0)

        // 复制音频数据到输出
        for (let i = 0; i < inputData.length; i++) {
          outputData[i] = inputData[i]
        }
      }

      return true
    } catch (error) {
      console.error('启动语音识别失败:', error)
      return false
    }
  }

  /**
   * 清理资源
   */
  destroy() {
    this.stopRecognition()

    if (this.model) {
      this.model = null
    }

    this.recognitionResults = []
    this.currentSegment = null
    this.onProgress = null
    this.onResult = null
    this.onError = null
    this.onComplete = null

    console.log('语音识别器已销毁')
  }
}

export default SpeechRecognizer
