import { ElMessage } from 'element-plus'
import 'element-plus/theme-chalk/el-message.css'
interface AudioStreamerConfig {
  wsUrl?: string
  targetSampleRate?: number
}
export interface KeywordResponse {
  current: string
  keywords: string[]
}
type MessageCallback = (data: any) => void

export class KeyWordsAudioStreamer {
  private config: Required<AudioStreamerConfig>
  private websocket?: WebSocket
  private mediaStream?: MediaStream
  private audioContext?: AudioContext
  private processorNode?: AudioWorkletNode
  private messageCallback?: MessageCallback
  private isConnected: boolean = false

  constructor(config: AudioStreamerConfig) {
    const defaultConfig: Required<AudioStreamerConfig> = {
      wsUrl: '',
      targetSampleRate: 16000,
    }

    this.config = {
      ...defaultConfig,
      ...config,
    }
  }

  // 启动音频流传输
  async start(): Promise<void> {
    try {
      // 初始化WebSocket连接
      this.websocket = new WebSocket(this.config.wsUrl)
      await this.setupWebSocket()

      // 获取设备流
      this.mediaStream = await navigator.mediaDevices.getUserMedia({
        audio: {
          // 浏览器会自动忽略不支持的约束
          // 所以未必采样率就是16000
          channelCount: 1,
          sampleRate: 16000,
          noiseSuppression: true,
          echoCancellation: true,
          autoGainControl: true,
        },
      })

      // 设置音频处理管道
      await this.setupAudioProcessing()

      // 发送心跳包
      setInterval(() => {
        if (this.websocket && this.isConnected) {
          console.log('发送关键词心跳包')
          // 发送空音频数据
          const emptyAudioData = new Float32Array(0)
          this.websocket.send(emptyAudioData)
        } else {
          console.log('检测到关键词后端连接关闭，重新开启连接', Date())
          this.stop()
          this.start()
        }
      }, 30000)
    } catch (error) {
      this.stop()
      throw error
    }
  }

  // 停止并释放资源
  async stop(): Promise<void> {
    this.mediaStream?.getTracks().forEach((t) => t.stop())
    if (this.audioContext && this.audioContext.state !== 'closed') {
      await this.audioContext.close()
    }
    this.websocket?.close()
  }

  // 消息回调
  onMessage(callback: MessageCallback): void {
    this.messageCallback = callback
  }

  private async setupWebSocket(): Promise<void> {
    return new Promise((resolve, reject) => {
      if (!this.websocket) return

      this.websocket.binaryType = 'arraybuffer'

      this.websocket.onopen = () => {
        this.isConnected = true
        console.log('🤖: 关键词后端连接成功', Date())
        resolve()
      }
      this.websocket.onmessage = (event) => {
        if (this.messageCallback) {
          this.messageCallback(event.data)
        } else {
          console.error('💀: 竟然没有注册事件回调')
        }
      }
      this.websocket.close = () => {
        this.isConnected = false
        console.log('🤖: 关键词后端连接关闭', Date())
        resolve()
      }
      this.websocket.onerror = (err) => {
        this.isConnected = false
        ElMessage.error('启动关键词失败，请检查关键词服务是否正常')
        reject(err)
      }
    })
  }

  private async setupAudioProcessing(): Promise<void> {
    if (!this.mediaStream) return

    // 获取 MediaStream 的采样率
    const audioTracks = this.mediaStream.getAudioTracks()

    if (audioTracks.length === 0) {
      throw new Error('😡: 这里怎么一个音频轨道都没有!')
    }

    let mediaStreamSampleRate: number | undefined
    audioTracks.forEach((track) => {
      let settings = track.getSettings()
      mediaStreamSampleRate = settings.sampleRate
    })

    if (mediaStreamSampleRate) {
      this.audioContext = new AudioContext({ sampleRate: 16000 })
    } else {
      console.warn('🤖: 看起来这个浏览器不支持指定采样率')
      this.audioContext = new AudioContext()
    }

    await this.audioContext.audioWorklet.addModule(
      // 放到 public 文件夹下面了
      '/resample-audio-processor.js'
    )

    this.processorNode = new AudioWorkletNode(
      this.audioContext,
      'resample-audio-processor'
    )

    // 监听来自 AudioWorkletProcessor 的消息
    this.processorNode.port.onmessage = (event) => {
      const audioData = event.data.audio
      if (
        this.websocket &&
        this.isConnected &&
        this.websocket.readyState === WebSocket.OPEN
      ) {
        // NOTE: 只有这里是发出去的
        this.websocket.send(audioData)
      } else {
        this.stop()
      }
    }

    // 获得咱们的麦克风流
    const sourceNode = this.audioContext.createMediaStreamSource(
      this.mediaStream
    )
    // 麦克风流会进入processorNode
    sourceNode.connect(this.processorNode)
    // processorNode 处理完后会输出到 audioContext.destination(就是输出设备扬声器)
    // 不过实际上 outputs 被丢掉了, 所以啥都没
    this.processorNode.connect(this.audioContext.destination)
  }
}
