let audioContext: AudioContext;
let analyser: AnalyserNode;
let audioSource: AudioBufferSourceNode;
let processorNode: ScriptProcessorNode;
/**
 * 当前播放的时间
 */
let playTime: number = 0;
/**
 * 开始或暂停后开始的时间戳(绝对)
 */
let playStamp: number = 0;
/**
 * 当前播放的音频数据
 */
let audioData: ArrayBuffer;
/**
 * 是否暂停
 */
let isPaused: boolean = false;
/**
 * 音频总时长
 */
let totalTime: number = 0;
/**
 * 结束时的回调函数
 */
let endCallback = (): void => { };
/**
 * 播放中的回调
 */
let playingCallback: (payload: { duration: number; data: Uint8Array }) => void;


const initAudio = (): void => {
  audioContext = new (globalThis.AudioContext || (globalThis as any).webkitAudioContext)();
  analyser = audioContext.createAnalyser();
  analyser.fftSize = 2048;
  playTime = 0;
  playStamp = 0;
  isPaused = false;
}

/**
 * 开始播放或者恢复播放
 */
const play = () => {
  isPaused = false;

  return audioContext.decodeAudioData(audioData.slice(0), buffer => {
    audioSource = audioContext.createBufferSource();

    audioSource.onended = () => {
      if (!isPaused) {
        // 计算总时长，其中audioContext.currentTime表示只增不减的硬件时间戳的双精度浮点数(只读)
        totalTime = audioContext.currentTime - playStamp + playTime;
        endCallback();
        audioSource?.disconnect();
        processorNode?.disconnect();
        analyser?.disconnect();
      }
    }

    // 设置数据
    audioSource.buffer = buffer;
    // connect到分析器，还是用录音的，因为播放时不能录音的
    audioSource.connect(analyser);
    analyser.connect(audioContext.destination);
    audioSource.start(0, playTime);

    // 记录当前的时间戳，以备暂停时使用
    playStamp = audioContext.currentTime;
  }).catch(error => {
    console.log(error);
    throw error
  })
}

// 销毁source, 由于 decodeAudioData 产生的source每次停止后就不能使用，所以暂停也意味着销毁，下次需重新启动。
const destroySource = (): void => {
  if (audioSource) {
    audioSource?.stop();
    audioSource = null as any;
  }
}

const playAudio = {
  /**
   * 播放录音
   */
  play(data: ArrayBuffer, channelsCount?: 1 | 2,): Promise<AudioBuffer> {
    if (!audioContext) initAudio();
    this.stopPlay();
    audioData = data;
    totalTime = 0;

    if (playingCallback) {
      // 默认声道数为1
      channelsCount = channelsCount ?? 1;
      /**
     * 第一个参数表示收集采样的大小，采集完这么多后会触发 onaudioprocess 接口一次，该值一般为1024,2048,4096等，一般就设置为4096
     * 第二，三个参数分别是输入的声道数和输出的声道数，保持一致即可。
     */
      const createScript = audioContext.createScriptProcessor || (audioContext as any).createJavaScriptNode;
      processorNode = createScript.apply(audioContext, [4096, channelsCount, channelsCount]);
      analyser.connect(processorNode);
      // 处理节点，连接到扬声器
      processorNode.connect(audioContext.destination);
      let duration: number = 0;
      // 音频采集
      processorNode.onaudioprocess = () => {
        if (!isPaused) {
          // 当前已经播放录音时长
          duration += 4096 / audioContext.sampleRate;

          // 当前的播放录音数据(实时)
          const data = new Uint8Array(analyser.frequencyBinCount);
          // 将数据拷贝到dataArray中。
          analyser.getByteTimeDomainData(data);

          // 执行当前播放录音的实时回调函数
          playingCallback({
            duration,
            data,
          })
        }
      }
    }

    return play();
  },

  /**
   * 暂停播放
   */
  pausePlay(): void {
    if (!isPaused) {
      destroySource();
      // 多次暂停需要累加
      playTime += audioContext.currentTime - playStamp;
      isPaused = true;
    }
  },

  /**
   * 恢复播放
   */
  resumePlay(): Promise<AudioBuffer> {
    return play();
  },

  /**
   * 停止播放
   */
  stopPlay(): void {
    playTime = 0;
    audioData = null as any;
    destroySource();
  },

  /**
   * 销毁播放对象，清空内存
   */
  destroyPlay(): void {
    audioSource?.disconnect();
    processorNode?.disconnect();
    analyser?.disconnect();
    destroySource();
    playTime = null as any;
    playStamp = null as any;
    audioContext = null as any;
    analyser = null as any;
    audioData = null as any;
    isPaused = null as any;
    totalTime = null as any;
    processorNode = null as any;
    endCallback = null as any;
  },

  /**
   * 得到当前播放时的数据
   */
  getPlayData(): Uint8Array {
    const data = new Uint8Array(analyser.frequencyBinCount);
    // 将数据拷贝到dataArray中。
    analyser.getByteTimeDomainData(data);

    return data;
  },

  /**
   * 增加播放结束的回调函数
   */
  addEndCallback(fn: () => void = (): void => { }) {
    endCallback = fn;
  },

  /**
   * 增加播放中的回调函数
   */
  addPlayingCallback(fn: (playload: { duration: number; data: Uint8Array }) => void) {
    playingCallback = fn;
  },

  /**
   * 获取当前播放的时长
   */
  getPlayTime(): number {
    const pTime: number = isPaused ? playTime : audioContext.currentTime - playStamp + playTime;

    return totalTime || pTime;
  }
};

export default playAudio;