import { request } from '@/utils'
function isInWechat() {
  return false
}
export enum AudioEmitType {
  UserClick,
  ProgramDirectPlay,
  WxReadyEvent,
  WxNetworkEvent,
}

export function autoTriggerWithoutUIInteraction(
  fn: (stopTry: () => void, emitType: AudioEmitType, s?: string) => void,
) {
  const handleAutoPlay = () => {
    console.info('点击空白触发播放音频')
    try {
      fn(removeEvent, AudioEmitType.UserClick, '用户点击空白播放')

      removeEvent()
    } catch (err) {
      console.error(err)
    }
  }

  const removeEvent = () => {
    document.removeEventListener('click', handleAutoPlay, false)
    document.removeEventListener('touchstart', handleAutoPlay, false)
    console.info('移除点击空白播放音频事件')
  }

  document.addEventListener('click', handleAutoPlay, false)
  document.addEventListener('touchstart', handleAutoPlay, false)

  // 非微信环境，直接播放
  if (!isInWechat()) {
    fn(removeEvent, AudioEmitType.ProgramDirectPlay, '直接播放')

    return
  }

  // 微信环境自动触发播放规则：ready 时，先触发播放，但实际不会播放，等 getNetworkType 触发时，resume 播放，才会实际播放。
  window.wx.ready(() => {
    console.info('微信 ready 触发')
    fn(removeEvent, AudioEmitType.WxReadyEvent, '微信 Ready 回调播放')

    console.info('等待 getNetworkType 回调')
    window.WeixinJSBridge.invoke('getNetworkType', {}, () => {
      console.info('getNetworkType 已回调，尝试播放第一次')
      fn(
        removeEvent,
        AudioEmitType.WxNetworkEvent,
        '微信 getNetworkType 回调播放',
      )
    })
  })
}

export default class AudioPlayer {
  mp3?: ArrayBuffer
  audioCtx: AudioContext
  gainNode?: GainNode
  source?: AudioBufferSourceNode

  get state() {
    return this.audioCtx.state
  }

  constructor() {
    const AudioContext = window.AudioContext || window.webkitAudioContext
    this.audioCtx = new AudioContext({ latencyHint: 'balanced' })
    this.setupGainNode()

    this.audioCtx.onstatechange = () =>
      console.info('Ctx 状态', this.audioCtx.state)
  }

  setupGainNode(gain = 1) {
    try {
      const gainNode = this.audioCtx.createGain()
      this.gainNode = gainNode
      gainNode.gain.value = gain
      gainNode.connect(this.audioCtx.destination)
    } catch (err) {
      console.error(err)
    }
  }

  volume(v = 1) {
    if (this.gainNode) {
      this.gainNode.gain.value = v
    }
  }

  async play(src?: string | ArrayBuffer, loop = true) {
    if (!src) {
      throw Error('无音频文件参数')
    }

    if (this.source) {
      this.source.disconnect()
    }

    const bf = await this.loadAudioBuffer(src)
    console.info('音频加载完成')
    await this.start(bf, loop)
    console.info('播放音频: ', this.state, this.audioCtx.state)
    await this.resume()
    console.info('首次恢复音频，fix ios bug')
  }

  async start(buffer: AudioBuffer, loop = true) {
    const source = this.audioCtx.createBufferSource()
    this.source = source
    source.buffer = buffer
    source.loop = loop
    source.connect((this.gainNode as AudioNode) ?? this.audioCtx.destination)
    source.start()
  }

  async stop() {
    if (this.audioCtx.state === 'running') {
      await this.audioCtx.suspend()
    }
  }

  async resume() {
    if (
      this.state === 'suspended' ||
      // eslint-disable-next-line @typescript-eslint/ban-ts-comment
      // @ts-ignore
      this.state === 'interrupted'
    ) {
      return this.audioCtx.resume()
    }
  }

  async loadAudioBuffer(src: string | ArrayBuffer): Promise<AudioBuffer> {
    this.mp3 =
      typeof src !== 'string'
        ? src
        : await request.get(src, {
            responseType: 'arraybuffer',
            baseURL: '',
          })

    return new Promise((resolve, reject) =>
      this.audioCtx.decodeAudioData(this.mp3 as ArrayBuffer, resolve, reject),
    )
  }
}
