let AudioContext = window.AudioContext || window.webkitAudioContext;
let audioCtx = AudioContext ? new AudioContext() : ''

let soundBuffer = {
  getBuffer(link) {
    return new Promise((resolve, reject) => {
      if (audioCtx) {
        let xhr = new XMLHttpRequest()
        xhr.open('GET', link)
        xhr.responseType = 'arraybuffer'
        xhr.onload = function() {
          audioCtx.decodeAudioData(xhr.response, function(buffer) {
            resolve(buffer)
          }, function(e) {
            reject(e)
          })
        }
        xhr.send()
      } else {
        reject('not support AudioContext')
      }
    })
  },
  createSound(buffer) {
    if (audioCtx.state !== 'running') {
      audioCtx.resume()
    }
    let analyser = audioCtx.createAnalyser()
    let gainNode = audioCtx.createGain()
    let source = audioCtx.createBufferSource()
    source.buffer = buffer
    source.connect(analyser)
    analyser.connect(gainNode)
    gainNode.connect(audioCtx.destination)
    return source
  }
}
export default soundBuffer
