import { compress, encodePCM, encodeWAV } from './audioEncode';
import Player from "./playAudio"

// 构造函数参数格式
type Config = {
  /**
   * 采样位数，一般8、16，默认16
   */
  sampleBits?: 8 | 16;
  /**
   * 采样率，一般 11025、16000、22050、24000、44100、48000，默认为浏览器自带的采样率
   */
  sampleRate?: number;
  /**
   * 声道数
   */
  channelsCount?: 1 | 2;
}

class Recorder {
  private audioContext: AudioContext = new (globalThis.AudioContext || (globalThis as any).webkitAudioContext)();
  /**
   * 录音分析机
   */
  private analyser: AnalyserNode = this.audioContext.createAnalyser();
  /**
   * 录音文件的总长度
   */
  private size: number = 0;
  /**
   * 是否是小端字节序
   */
  private littleEdian: boolean = false;
  /**
   * 录音配置
   */
  private configs: Required<Config> = {
    sampleBits: 16,
    sampleRate: this.audioContext.sampleRate,
    channelsCount: 1,
  };
  /**
   * 录音的大小，单位为字节
   */
  fileSize: number = 0;
  /**
   * 录音的时长
   */
  duration: number = 0;
  /**
   * 录音流
   */
  private stream!: MediaStream;
  /**
   * 音频源节点
   */
  private mediaSrouce!: MediaStreamAudioSourceNode;
  /**
   * 浏览器默认采样率
   */
  private defaultSampleRate: number = this.audioContext.sampleRate;
  /**
   * pcm音频数据搜集器(左声道)
   */
  private leftBuffer: Array<Float32Array> = [];
  /**
   * pcm音频数据搜集器(右声道)
   */
  private rightBuffer: Array<Float32Array> = [];
  private processorNode!: ScriptProcessorNode;
  /**
   * 由于safari问题，导致使用该方案代替disconnect/connect方案
   */
  private isRecord: boolean = false;
  /**
   * 是否正在播放
   */
  private isPlaying: boolean = false;
  /**
   * 音频播放回调
   */
  onplay!: () => void;
  /**
   * 音频暂停回调
   */
  onpauseplay!: () => void;
  /**
   * 音频恢复播放回调
   */
  onresumeplay!: () => void;
  /**
   * 音频停止播放回调
   */
  onstopplay!: () => void;
  /**
   * 音频播放结束的回调
   */
  onplayend!: () => void;
  onprogress: ((payload: {
    duration: number;
    fileSize: number;
    voper: number;
    data: Uint8Array;
  }) => void) | undefined = undefined;
  /**
   * 音频播放中的回调
   */
  onplaying!: ((payload: {
    duration: number;
    data: Uint8Array;
  }) => void);

  constructor(options?: Config) {
    if (options) {
      const { sampleBits, sampleRate, channelsCount } = options;
      Object.assign(this.configs, {
        sampleBits: sampleBits && [8, 16].includes(sampleBits) ? sampleBits : 16,
        sampleRate: sampleRate && [8000, 11025, 16000, 22050, 24000, 44100, 48000].includes(sampleRate) ? sampleRate : this.defaultSampleRate,
        channelsCount: channelsCount && [1, 2].includes(channelsCount) ? channelsCount : 1,
      })
    }
    // 判断端字节序
    this.littleEdian = (() => {
      const buffer: ArrayBuffer = new ArrayBuffer(2);
      new DataView(buffer).setInt16(0, 256, true);
      return new Int16Array(buffer)[0] === 256;
    })();
    Recorder.initGetUserMedia();
  }

  /**
   * getUserMedia兼容写法
   */
  static initGetUserMedia() {
    if (!navigator?.mediaDevices?.getUserMedia)
      navigator.mediaDevices.getUserMedia = (constraints?: MediaStreamConstraints) => {
        let getUserMedia = (navigator as any).getUserMedia || (navigator as any).webkitGetUserMedia || (navigator as any).mozGetUserMedia;

        if (!getUserMedia)
          return Promise.reject(new Error('浏览器不支持 getUserMedia !'));
        return new Promise((resolve, reject) => getUserMedia.call(navigator, constraints, resolve, reject))
      };
  }

  /**
   * 初始化录音实例
   */
  private initRecorder(): void {
    this.clearData();
    this.audioContext = new (globalThis.AudioContext || (globalThis as any).webkitAudioContext)();
    this.analyser = this.audioContext.createAnalyser();
    // 表示存储频域的大小
    this.analyser.fftSize = 2048;
    /**
     * 第一个参数表示收集采样的大小，采集完这么多后会触发 onaudioprocess 接口一次，该值一般为1024,2048,4096等，一般就设置为4096
     * 第二，三个参数分别是输入的声道数和输出的声道数，保持一致即可。
     */
    const createScript = this.audioContext.createScriptProcessor || (this.audioContext as any).createJavaScriptNode;
    this.processorNode = createScript.apply(this.audioContext, [4096, this.configs.channelsCount, this.configs.channelsCount]);
    // 音频采集
    this.processorNode.onaudioprocess = (e: AudioProcessingEvent) => {
      if (this.isRecord) {
        // 左声道数据
        const leftData: Float32Array = e.inputBuffer.getChannelData(0);
        // 右声道数据
        let rightData: Float32Array;
        this.leftBuffer.push(new Float32Array(leftData));
        this.size += leftData.length;

        // 是否为双声道
        if (this.configs.channelsCount === 2) {
          rightData = e.inputBuffer.getChannelData(1);
          this.rightBuffer.push(new Float32Array(rightData));
          this.size += rightData.length;
        }

        const { sampleRate, sampleBits } = this.configs;
        // 计算录音的大小
        this.fileSize = Math.floor(this.size / Math.max(this.defaultSampleRate / sampleRate, 1)) * (sampleBits / 8);

        // 当前录音时长
        this.duration += 4096 / this.defaultSampleRate;

        // 当前的录音数据(实时)
        const data = new Uint8Array(this.analyser.frequencyBinCount);
        // 将数据拷贝到dataArray中。
        this.analyser.getByteTimeDomainData(data);

        // 执行当前录音的实时回调函数
        this.onprogress && this.onprogress({
          duration: this.duration,
          fileSize: this.fileSize,
          voper: Math.max.apply(Math, [...leftData]) * 100,
          data,
        })
      }
    }
  }

  /**
   * 开始录音
   */
  start(): Promise<void> {
    if (this.isRecord) return Promise.reject();
    return new Promise(async (resolve, reject) => {
      try {
        // 关闭先前的录音实例，因为前次的实例会缓存少量前次的录音数据
        if (this.audioContext) await this.destroy();
        this.isRecord = true;
        this.initRecorder();

        const stream: MediaStream = await navigator.mediaDevices.getUserMedia({ audio: true });
        this.stream = stream;
        this.mediaSrouce = this.audioContext.createMediaStreamSource(stream);
        this.mediaSrouce.connect(this.analyser);
        this.analyser.connect(this.processorNode);
        // 处理节点，连接到扬声器
        this.processorNode.connect(this.audioContext.destination);
        resolve();
      } catch (error) {
        console.log(error);
        reject();
      }
    })
  }

  /**
   * 暂停录音
   */
  pause(): void {
    this.isRecord = false;
  }

  /**
   * 恢复录音
   */
  resume(): void {
    this.isRecord = true;
  }

  /**
   * 停止录音
   */
  stop(): void {
    this.mediaSrouce?.disconnect();
    this.processorNode.disconnect();
    this.analyser.disconnect();
    this.isRecord = false;
  }

  /**
   * 获取录音的PCM二进制数据
   */
  getPCM(): DataView {
    // 先停止
    this.stop();
    // 获取pcm数据
    const data = this.flat();
    // 按采样位数重新编码
    return encodePCM(
      compress(data, this.defaultSampleRate, this.configs.sampleRate),
      this.configs.sampleBits,
      this.littleEdian,
    );
  }

  /**
   * 获取录音的WAV二进制数据
   */
  getWAV(): DataView {
    // PCM增加44字节的头就是WAV格式了
    return encodeWAV({
      data: this.getPCM(),
      inputSampleRate: this.defaultSampleRate,
      outputSampleRate: this.configs.sampleRate,
      channelsCount: this.configs.channelsCount,
      sampleBits: this.configs.sampleBits,
      littleEdian: this.littleEdian,
    });
  }

  /**
   * 播放录音
   */
  play(): void {
    this.stop();
    this.isPlaying = true;
    // 注册播放结束的回调函数
    Player.addEndCallback(this.onplayend);
    Player.addPlayingCallback(this.onplaying);
    this.onplay && this.onplay();
    const wavData: DataView = this.getWAV();
    wavData.byteLength > 44 && Player.play(wavData.buffer);
  }

  /**
   * 获取播放时间
   */
  getPlayTime(): number {
    return Player.getPlayTime();
  }

  /**
   * 暂停播放
   */
  pausePlay(): void {
    // 如果没有在录音或者是正在播放才能暂停
    if (!this.isRecord && this.isPlaying) {
      this.isPlaying = false;
      this.onpauseplay && this.onpauseplay();
      Player.pausePlay();
    }
  }

  /**
   * 恢复播放
   */
  resumePlay(): void {
    // 如果没有在录音和已经暂停才能恢复
    if (!this.isRecord && !this.isPlaying) {
      this.isPlaying = true;
      this.onresumeplay && this.onresumeplay();
      Player.resumePlay();
    }
  }

  /**
   * 停止播放
   */
  stopPlay(): void {
    // 如果正在录音则停止播放无效
    if (!this.isRecord) {
      this.isPlaying = false;
      this.onstopplay && this.onstopplay();
      Player.stopPlay();
    }
  }

  /**
   * 获取播放时的数据(可用于波形制作)
   */
  getPlayData(): Uint8Array {
    return Player.getPlayData();
  }

  /**
   * 获取PCM格式的Blob数据
   */
  getPCMBlob(): Blob {
    return new Blob([this.getPCM()]);
  }

  /**
   * 获取WAV格式的Blob数据
   */
  getWAVBlob(): Blob {
    return new Blob([this.getWAV()], { type: 'audio/wav' });
  }

  /**
   * 获取左右声道的数据
   */
  getChannelData() {
    const pcmData: DataView = this.getPCM();
    const len: number = pcmData.byteLength;
    const data: {
      leftData: DataView,
      rightData?: DataView,
    } = {
      leftData: null as any,
      rightData: undefined,
    };

    if (this.configs.channelsCount === 1) data.leftData = pcmData;
    else {
      // 打开双通道
      const left = new DataView(new ArrayBuffer(len / 2));
      const right = new DataView(new ArrayBuffer(len / 2));

      if (this.configs.sampleBits === 8)
        for (let i: number = 0; i < len / 2; i += 2) {
          left.setInt8(i, pcmData.getInt8(i * 2));
          right.setInt8(i, pcmData.getInt8(i * 2 + 1));
        }
      else
        for (let i: number = 0; i < len / 2; i += 2) {
          left.setInt16(i, pcmData.getInt16(i * 2, this.littleEdian), this.littleEdian)
          right.setInt16(i, pcmData.getInt16(i * 2 + 2, this.littleEdian), this.littleEdian)
        }
      data.leftData = left;
      data.rightData = right;
    }

    return data
  }

  /**
   * 清空数据
   */
  private clearData(): void {
    this.leftBuffer.splice(0);
    this.rightBuffer.splice(0);
    this.size = 0;
    this.fileSize = 0;
    this.duration = 0;
  }

  /**
   * 将二维数据转为一维数据
   */
  private flat(): { leftData: Float32Array; rightData: Float32Array; } {
    let left: Float32Array = new Float32Array(0), right: Float32Array = left;

    if (this.configs.channelsCount === 1) left = new Float32Array(this.size);
    else {
      left = new Float32Array(this.size / 2);
      right = left;
    }

    // 偏移量
    let offset: number = 0;

    // 左声道处理
    for (const buffer of this.leftBuffer) {
      left.set(buffer, offset);
      offset += buffer.length;
    }

    offset = 0;


    // 右声道
    for (const buffer of this.rightBuffer) {
      right.set(buffer, offset);
      offset += buffer.length;
    }

    return {
      leftData: left,
      rightData: right,
    }
  }

  /**
   * 销毁录音对象
   *
   */
  destroy(): Promise<void> {
    this.clearData();
    // 结束流
    if (this.stream?.getTracks) {
      this.stream.getTracks().forEach(track => track.stop());
      this.stream = null as any;
    }

    Player.destroyPlay();

    if (this.audioContext?.close && this.audioContext?.state !== 'closed') return this.audioContext.close();
    else
      return new Promise((resolve) => resolve());
  }
}

export default Recorder