/**
 * @description 录音服务
 */

import { getUserMediaApi } from '@/utils/client'
import audioBufferToWav from '@/utils/buffer'

export enum SampleBitsTypes {
  Bit8 = 8,
  Bit16 = 16,
  Bit32 = 32,
}

export interface AudioDataMODEl {
  size: number // 录音文件长度
  buffer: [] // 录音缓存
  clear: () => void
  input: (data: Float32Array) => void
}

export interface AudioProcessing {
  buffer?: Blob | ArrayBuffer
  peak: number
}

export interface RecorderOptions {
  inputChannels?: number // 输入声道数
  outputChannels?: number // 输出声道数
  bufferSize?: number // 缓冲区大小
  bitRate?: number // 比特率
  sampleRate?: number // 采样率
  sampleBits?: SampleBitsTypes // 采样位
  onComplete?: LooseFunction // 采样完成回调
  onError?: LooseFunction // 错误回调
  onAudioProcessing?: (args: AudioProcessing) => LooseObject // 音频捕获事件
}

export default class Recorder {
  private stream: MediaStream
  private audioContext = null
  private audioInput: MediaStreamAudioSourceNode = null
  private bufferSize = 2048
  private sampleRate = 16000
  private sampleBits = SampleBitsTypes.Bit16

  /**
   * 配置项
   */
  config: RecorderOptions = {}

  /**
   * 创建声音的缓存节点
   * createScriptProcessor()
   */
  recorder: ScriptProcessorNode | null = null

  /**
   * 特定处理音频数据对象
   */
  audioData: AudioDataMODEl | null = null

  /**
   * 获取浏览器媒体对象
   */
  getUserMedia = getUserMediaApi()

  static tag = '[::Recorder::]'

  private static throwError(...message) {
    console.error.call(console, this.tag, ...message)
  }

  // 获取录音机
  static get(success: (recorder: Recorder) => LooseObject, error?: LooseFunction, config?: RecorderOptions) {
    if (typeof error !== 'function') {
      config = error
      error = () => 0
    }
    const getUserMedia = getUserMediaApi()
    if (success) {
      if (getUserMedia) {
        getUserMedia(
          { audio: true, video: false }, // 只启用音频
          (stream) => {
            const recorder = new Recorder(stream, config)
            success(recorder)
          },
          (err) => {
            const flat = err.name || err.code
            let message: string
            switch (flat) {
              case 'PERMISSION_DENIED':
              case 'PermissionDeniedError':
                Recorder.throwError((message = '用户拒绝提供信息'))
                break
              case 'NOT_SUPPORTED_ERROR':
              case 'NotSupportedError':
                // <不支持硬件设备。 <a href="https://www.it610.com/article/1296305722631462912.htm" target="_blank" class="keylink">浏览器</a>
                Recorder.throwError((message = '您的设备不支持录音'))
                break
              case 'MANDATORY_UNSATISFIED_ERROR':
              case 'MandatoryUnsatisfiedError':
              case 'NotFoundError':
                message = '您的设备不支持录音'
                Recorder.throwError('无法发现指定的硬件设')
                break
              default:
                message = '无法打开麦克风, 请重试'
                Recorder.throwError(`无法打开麦克风 异常信息: ${flat}`)
                break
            }
            error?.(message)
          },
        )
      } else {
        Recorder.throwError('当前浏览器不支持录音功能')
        error?.('当前浏览器不支持录音功能')
        return
      }
    }
  }

  constructor(stream: MediaStream, config: RecorderOptions = {}) {
    this.stream = stream
    this.config = config
    this.sampleBits = config.sampleBits || this.sampleBits
    this.sampleRate = config.sampleRate || this.sampleRate
    this.bufferSize = config.bufferSize || this.bufferSize
    this.audioContext = new (window.AudioContext || window.webkitAudioContext)({ sampleRate: this.sampleRate })
    this.audioInput = this.audioContext.createMediaStreamSource(stream)
    this.recorder = this.audioContext.createScriptProcessor(this.bufferSize, 2, 2)
    this.audioData = {
      size: 0, // 录音文件长度
      buffer: [], // 录音缓存
      clear() {
        this.size = 0
        this.buffer = []
      },
      input(data) {
        this.buffer.push(...new Float32Array(data))
        this.size += data.length
      },
    }
  }

  // 音频采集
  broadcas(audioProcessingEvent) {
    const value = audioProcessingEvent.inputBuffer
    this.audioData.input(value)
    const data: AudioProcessing = audioBufferToWav(audioProcessingEvent.inputBuffer)
    this.config.onAudioProcessing?.(data)
  }

  // 开始录音
  start() {
    this.audioInput.connect(this.recorder)
    this.recorder.connect(this.audioContext.destination)
    this.recorder.onaudioprocess = this.broadcas.bind(this)
  }

  // 停止
  stop() {
    this.audioInput.disconnect()
    this.recorder.disconnect()
    this.stream.getAudioTracks().forEach((x) => x?.stop())
    this.recorder.onaudioprocess = null
  }
}
