let ws = null;
let ws2 = null;
let interval = null;
var audioContext;
try {
  audioContext = new (window.AudioContext || window.webkitAudioContext)();
} catch (e) {
  alert("您当前的浏览器不支持Web Audio API ");
}
export const Recorder = function (stream, inputSampleRate, sampleRate) {
  var sampleBits = 16; //输出采样数位 8, 16
  var sampleRate = sampleRate; //输出采样率
  var context = new AudioContext();
  var audioInput = context.createMediaStreamSource(stream);
  var recorder = context.createScriptProcessor(4096, 1, 1);
  var audioData = {
    size: 0, //录音文件长度
    buffer: [], //录音缓存
    inputSampleRate: inputSampleRate, //输入采样率
    inputSampleBits: 16, //输入采样数位 8, 16
    outputSampleRate: sampleRate, //输出采样数位
    oututSampleBits: sampleBits, //输出采样率
    clear: function () {
      this.buffer = [];
      this.size = 0;
    },
    input: function (data) {
      this.buffer.push(new Float32Array(data));
      this.size += data.length;
    },
    compress: function () {
      //合并压缩
      //合并
      var data = new Float32Array(this.size);
      var offset = 0;
      for (var i = 0; i < this.buffer.length; i++) {
        data.set(this.buffer[i], offset);
        offset += this.buffer[i].length;
      }
      //压缩
      var compression = parseInt(this.inputSampleRate / this.outputSampleRate);
      //console.log(compression)
      var length = data.length / compression;
      var result = new Float32Array(length);
      var index = 0,
        j = 0;
      while (index < length) {
        result[index] = data[j];
        j += compression;
        index++;
      }
      return result;
    },
    encodePCM: function () {
      //这里不对采集到的数据进行其他格式处理，如有需要均交给服务器端处理。
      var sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate);
      var sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits);
      var bytes = this.compress();
      var dataLength = bytes.length * (sampleBits / 8);
      var buffer = new ArrayBuffer(dataLength);
      var data = new DataView(buffer);
      var offset = 0;
      for (var i = 0; i < bytes.length; i++, offset += 2) {
        var s = Math.max(-1, Math.min(1, bytes[i]));
        data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
      }
      return new Blob([data]);
    },
  };
  var sendData = function () {
    //对以获取的数据进行处理(分包)
    var reader = new FileReader();
    reader.onload = (e) => {
      var outbuffer = e.target.result;
      var arr = new Int8Array(outbuffer);
      if (arr.length > 0) {
        // var tmparr = new Int8Array(1280);
        // var j = 0;
        // for (var i = 0; i < arr.byteLength; i++) {
        //   tmparr[j++] = arr[i];
        //   if ((i + 1) % 1280 == 0) {
        //     ws.send(tmparr);

        //     if (arr.byteLength - i - 1 >= 1280) {
        //       tmparr = new Int8Array(1280);
        //     } else {
        //       tmparr = new Int8Array(arr.byteLength - i - 1);
        //     }
        //     j = 0;
        //   }
        //   if (i + 1 == arr.byteLength && (i + 1) % 1280 != 0) {
        //     ws.send(tmparr);
        //   }
        // }
        ws.send(arr);
      }
    };
    reader.readAsArrayBuffer(audioData.encodePCM());
    audioData.clear(); //每次发送完成则清理掉旧数据
  };
  this.start = function () {
    audioInput.connect(recorder);
    recorder.connect(context.destination);
  };
  this.stop = function () {
    stream.getTracks()[0].stop();
    audioInput.disconnect();
    recorder.disconnect();
    context.close();
    console.warn("关闭音频流");
  };
  this.getBlob = function () {
    return audioData.encodePCM();
  };
  this.clear = function () {
    audioData.clear();
  };
  recorder.onaudioprocess = function (e) {
    console.warn("---");
    var inputBuffer = e.inputBuffer.getChannelData(0);
    audioData.input(inputBuffer);
    //console.log()
    sendData();
    //console.log('发送音频流')
  };
};
export const useWebSocket = (record, url) => {
  ws = new WebSocket(url);
  ws.onopen = function () {
    //连接成功建立的回调方法
    ws.binaryType = "text";
    ws.send(JSON.stringify({ asrType: "real", msgType: "init" }));
    console.log("发送初始化");
    if (ws.readyState == 1) {
      //ws进入连接状态，则每隔500毫秒发送一包数据
      //interval = setInterval(() => {
      //  ws.binaryType = "arraybuffer";
      record.start();
      //}, 500);
    }
  };
  //接收到消息的回调方法
  ws.onmessage = function (MesssageEvent) {
    console.info(MesssageEvent);
    //返回结果
    var jsonStr = MesssageEvent.data;
    if (jsonStr != null && jsonStr != "") {
      var res = JSON.parse(jsonStr);
      stopAudio();
      alert(res.msg);
    }
  };
  //连接关闭的回调方法
  ws.onerror = function (err) {
    console.info(err);
    // textResult.innerText = "";
  };
  //关闭websocket连接
  ws.onclose = function (msg) {
    console.info(msg);
    // textResult.innerText = result;
  };
};
export const useWebSocket2 = (url) => {
  ws2 = new WebSocket(url);
  // ws.binaryType = 'arraybuffer'; //传输的是 ArrayBuffer 类型的数据
  ws2.onopen = function () {};
  //接收到消息的回调方法
  ws2.onmessage = function (MesssageEvent) {
    //console.info(MesssageEvent)
    //返回结果
    var jsonStr = MesssageEvent.data;
    //返回结果
    receive(jsonStr);
  };
  //连接关闭的回调方法
  ws2.onerror = function (err) {
    console.info(err);
    // textResult.innerText = "";
  };
  //关闭websocket连接
  ws2.onclose = function (msg) {
    console.info(msg);
    // textResult.innerText = result;
  };
};
export const stopAudio = (record) => {
  if (ws2) {
    ws2.close();
  }
  if (ws) {
    ws.send(JSON.stringify({ msgType: "audioEnd" }));
    ws.send(JSON.stringify({ msgType: "end" }));
    ws.close();
    record.stop();
  }
  clearInterval(interval);
  interval = null;
};

export const receive = (data) => {
  if (typeof e == "string" && JSON.parse(e).message == "OK") {
    console.log("OK");
  } else {
    var buffer = new Response(data).arrayBuffer();
    buffer.then(function (buf) {
      audioContext.decodeAudioData(buf, function (buffer) {
        _visualize(audioContext, buffer); //播放
      });
    });
  }
};
export const _visualize = (audioContext, buffer) => {
  var audioBufferSouceNode = audioContext.createBufferSource(),
    analyser = audioContext.createAnalyser();
  //将信号源连接到分析仪
  audioBufferSouceNode.connect(analyser);
  //将分析仪连接到目的地（扬声器），否则我们将听不到声音
  analyser.connect(audioContext.destination);
  //然后将缓冲区分配给缓冲区源节点
  //console.log(buffer)
  audioBufferSouceNode.buffer = buffer;
  //发挥作用
  if (!audioBufferSouceNode.start) {
    audioBufferSouceNode.start = audioBufferSouceNode.noteOn; //在旧浏览器中使用noteOn方法
    audioBufferSouceNode.stop = audioBufferSouceNode.noteOff; //在旧浏览器中使用noteOff方法
  }
  //如果有的话，停止前一个声音
  // if (this.animationId !== null) {
  //   cancelAnimationFrame(this.animationId);
  // }
  audioBufferSouceNode.start(0);
};
export const clearTimer = () => {
  clearInterval(interval);
  interval = null;
};
