import {getDataByUrl} from "@/utils/js/common.ts";
export interface Config {
  detectIntensity: number, // 检测力度:0-100 // 就是多长检测一次,越大越精细,检测越准确,
  muteThreshold: number,  // 静音检测阈值:0-100 多少音量才算是静音的
  startDelay: number,     // 开始延时: 1000
  endDelay: number,       // 结束延时: 1000
  muteDuration: number,   // 静音时长
}
class MuteDetect {
  private static muteDetect: any = null;
  private config: Config;
  private muteDetectCb: Function;
  // 单例模式:智能初始化一个对象的类
  private constructor (config: Config) {
    this.config = Object.assign ({}, config);
  }
  // 初始化
  public static Init (config: Config) {
    if (MuteDetect.muteDetect) {
      Object.assign (MuteDetect.muteDetect, config);
    } else {
      MuteDetect.muteDetect = new MuteDetect (config);
    }
    return MuteDetect.muteDetect;
  }
  public static splitAudioBuffer (audioBuffer: AudioBuffer, startDelay: number, endDelay: number) {
    // 根据音频时长计算出
  }
  // 转换为AudioBuffer
  private static decode (buffer: ArrayBuffer, sampleRate: number) {
    const ctx = new AudioContext ({sampleRate})
    return ctx.decodeAudioData (buffer).finally (() => {
      ctx.close ();
    })
  }
  // 推送实时流
  public feed (streamData: ArrayBuffer, muteDetectCb: Function) {
  }
  // 获取音频静音区间
  public getMuteRange (audioSrc: string): Promise<Array<any>> {
    return new Promise (async (resolve, reject) => {
      try {
        // 首先需要下载文件
        const buffer = await getDataByUrl (audioSrc, 'arraybuffer');
        const audioBuffer = await MuteDetect.decode (<ArrayBuffer>buffer, 16000);// 下载文件后得到二进制需要把数据转为audioBuffer
        const muteRange: number[][] = this.calcMuteRange (audioBuffer);// 得到audioBuffer 就可以对音频进行计算了
        resolve (muteRange);
      } catch (e) {
        reject (e)
      }
    })
  }
  public destroy () {
  }
  // 计算audioBuffer的静音区段
  private calcMuteRange (audioBuffer: AudioBuffer): Array<Array<number>> {
    const {duration, length} = audioBuffer
    const res: Array<Array<number>> = [];
    const {detectIntensity} = this.config
    const realDetectNum = Math.max (detectIntensity, 5);
    const data = this.splitDataByConfig (audioBuffer);// 根据开始延时和结束延时切割数据
    const baseIndex = length / (duration * 1000);// 计算1ms占多少字节长度
    let stack: Array<number> = [];
    let sum = 0;
    let beforeIndex = 0;
    for (let i = 0; i < data.length; i++) {
      if (i !== 0 && i % realDetectNum === 0) {
        // 这里判断最大能量和muteThreshold的对应关系
        const avg = sum / (i - beforeIndex)
        const isMute = this.judgeMute (avg);
        if (isMute) {
          // 当前是静音的,判断之前有没有数据,没有数据,则我们这个是静音首地址
          if (!stack.length) {
            stack.push (i);
          }
        } else {
          // 判断之前有没有数据,有的话,把之前的出栈,代表之前是静音的,否则,之前不是静音的,则继续
          if (stack.length) {
            // 这里需要判断以下静音时长是否达到目标
            const beforeIndex = stack[0];
            const overMuteDur = this.judgeMuteTimeEnough (beforeIndex, i, baseIndex);
            if (overMuteDur) {
              stack.push (i); // 超出静音时长
              res.push ([...stack]);
              stack.splice (0, stack.length);
            } else {
              // 时长不足,什么都不做
            }
          } else {
            // 之前没有数据,什么都不做
          }
        }
        sum = 0;
        beforeIndex = i;
      }
      sum += Math.abs (data[i] || 0);
      // const curVal = Math.abs (data[i] || 0);
      // if (curVal > max) {
      //   max = curVal
      // }
    }
    // 这里根据baseIndex计算时长
    return this.calcMueDuration (res, baseIndex);
  }
  // 根据开始延迟和结束延时切分声道数据
  private splitDataByConfig (audioBuffer: AudioBuffer): Float32Array {
    const {startDelay, endDelay} = this.config;
    const {duration} = audioBuffer
    let data = audioBuffer.getChannelData (0);
    const baseSize = data.length / duration * 1000;// 这是每1s的data长度
    // 计算开始占多少字节,头部可以不算
    if (startDelay < duration) {
      const startSize = baseSize * startDelay
      data = data.slice (startSize);
    }
    if (endDelay < duration) {
      const endSize = baseSize * endDelay;
      data = data.slice (0, data.length - 1 - endSize);
    }
    return data;
  }
  // 用于把max 和 muteThreshold 进行比较,判断是否处于静音
  private judgeMute (value: number): boolean {
    const {muteThreshold} = this.config
    const RATE = 10000000000;
    return muteThreshold / RATE > value
  }
  // 判断静音时长是否足够
  private judgeMuteTimeEnough (beforeIndex: number, newIndex: number, baseIndex: number): boolean {
    const {muteDuration} = this.config;
    const indexSub = newIndex - beforeIndex;
    return muteDuration * baseIndex <= indexSub;
  }
  // 根据res 里面栈的index 计算出时长单位 ms
  private calcMueDuration (res: Array<Array<number>>, baseIndex: number): Array<Array<number>> {
    const arr: Array<Array<number>> = []
    res.forEach (item => {
      const [before, after] = item;
      arr.push ([Number ((before / baseIndex / 1000).toFixed (2)), Number ((after / baseIndex / 1000).toFixed (2))])
    })
    return arr;
  }
}
export default MuteDetect;