let AudioContext = window.AudioContext || window.webkitAudioContext;
let audio = document.getElementById('audio');
var fileChange = document.getElementById('fileChooser');


function init() {

}

let audioContext, analyser, audioSrc

function _audio() {
    audioContext = new AudioContext();
    analyser = audioContext.createAnalyser();

    audioSrc = audioContext.createMediaElementSource(audio);
    audioSrc.connect(analyser);
    analyser.connect(audioContext.destination);

    audioContext.onstatechange = (e) => {
        console.log('e:', e)
        console.log('audioContext.state:', audioContext.state);
        console.log('audioContext.currentTime:', audioContext.currentTime);
        console.log('audioContext.destination:', audioContext.destination);
        console.log('audioContext.listener:', audioContext.listener);
        console.log('audioContext.sampleRate:', audioContext.sampleRate);
        console.log('audioContext.mozAudioChannelType:', audioContext.mozAudioChannelType);

        /*      属性
              AudioContext.currentTime 只读
              以双精度浮点型数字返回硬件调用的秒数，AudioContext一创建就从0开始走，无法停掉、暂停或者重置。
              AudioContext.destination 只读
              返回AudioDestinationNode对象，表示当前audio context中所有节点的最终节点，一般表示音频渲染设备。
              AudioContext.listener 只读
              返回AudioListener对象，用于3D音频空间化。
              AudioContext.sampleRate 只读
              返回用浮点数表示的采样率，也就是每秒的采样数，同一个AudioContext中的所有节点采样率相同，所以不支持采样率转换。
              AudioContext.state 只读
              返回AudioContext当前状态.
              AudioContext.mozAudioChannelType  只读
              被用于返回一个 Firefox OS 设备上AudioContext 将会播放的音频声道.*/
    }

}

function _close() {
    audioContext.close().then(function() {
        console.log('audioContext.close()')
    });
}

// 创建一个时长2秒的音频片段，并用白噪声填充它
function _play() {
    var audioCtx = new(window.AudioContext || window.webkitAudioContext)();

    // 立体声
    var channels = 2;
    // 创建一个 采样率与音频环境(AudioContext)相同的 时长2秒的 音频片段。
    var frameCount = audioCtx.sampleRate * 2.0;

    var myArrayBuffer = audioCtx.createBuffer(channels, frameCount, audioCtx.sampleRate);


    // 使用白噪声填充;
    // 就是 -1.0 到 1.0 之间的随机数
    for (var channel = 0; channel < channels; channel++) {
        // 这允许我们读取实际音频片段(AudioBuffer)中包含的数据
        var nowBuffering = myArrayBuffer.getChannelData(channel);
        for (var i = 0; i < frameCount; i++) {
            // Math.random() is in [0; 1.0]
            // audio needs to be in [-1.0; 1.0]
            nowBuffering[i] = Math.random() * 2 - 1;
        }


        // 获取一个 音频片段源节点(AudioBufferSourceNode)。
        // 当我们想播放音频片段时，我们会用到这个源节点。
        var source = audioCtx.createBufferSource();
        // 把刚才生成的片段加入到 音频片段源节点(AudioBufferSourceNode)。
        source.buffer = myArrayBuffer;
        // 把 音频片段源节点(AudioBufferSourceNode) 连接到
        // 音频环境(AudioContext) 的终节点，这样我们就能听到声音了。
        source.connect(audioCtx.destination);
        // 开始播放声源
        source.start();
    }
}

// 
let buffer

// var  buffer =data.data
// var file= new File([buffer], data.fileName)//File 是 的 子类
var file

fileChange.onchange = fileChange = (e) => {
    if (e.target.files[0]) {
        let playfile = URL.createObjectURL(e.target.files[0]);
        audio.src = playfile;
        let musicName = e.target.files[0].name.split('.')[0];
        audio.load();
        // _audio()
        file = e.target.files
        buffer = new Blob(e.target.files, { type: 'audio/mp3' }); // Blob 适合大文件
        console.log('buffer', buffer)
    }
};



function _play_mp3() {
    var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
    var myAudio = document.querySelector('audio');
    // Create a MediaElementAudioSourceNode
    // Feed the HTMLMediaElement into it
    var source = audioCtx.createMediaElementSource(myAudio);

    // Create a gain node
    var gainNode = audioCtx.createGain();

    // Create variables to store mouse pointer Y coordinate
    // and HEIGHT of screen

    // connect the AudioBufferSourceNode to the gainNode
    // and the gainNode to the destination, so we can play the
    // music and adjust the volume using the mouse cursor
    source.connect(gainNode);
    gainNode.connect(audioCtx.destination);
}

function _play_XML() {
    // define variables
    var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
    var source;
    var play = document.querySelector('.play');
    var stop = document.querySelector('.stop');

    // use XHR to load an audio track, and
    // decodeAudioData to decode it and stick it in a buffer.
    // Then we put the buffer into the source

    function getData() {
        source = audioCtx.createBufferSource();
        var request = new XMLHttpRequest();

        request.open('GET', 'http://127.0.0.1:8080/AudioContext+canvas/mp3/%E6%9D%A8%E5%AD%90%E5%A7%97%20-%20%E5%BE%AE%E7%94%9C%E7%9A%84%E5%9B%9E%E5%BF%86.mp3', true);

        request.responseType = 'arraybuffer';


        request.onload = function() {
            var audioData = request.response;
            console.log('audioData:', audioData)
            audioCtx.decodeAudioData(audioData, function(buffer) {
                    source.buffer = buffer;

                    source.connect(audioCtx.destination);
                    source.loop = true;
                },

                function(e) { "Error with decoding audio data" + e.err });

        }

        request.send();
    }

    // wire up buttons to stop and play audio

    play.onclick = function() {
        getData();
        source.start(0);
        play.setAttribute('disabled', 'disabled');
    }

    stop.onclick = function() {
        source.stop(0);

    }

}

function _play_file() {
    // define variables
    var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
    var source;
    var play = document.querySelector('.play');
    var stop = document.querySelector('.stop');

    // use XHR to load an audio track, and
    // decodeAudioData to decode it and stick it in a buffer.
    // Then we put the buffer into the source



    function getData(blob = buffer) {
        source = audioCtx.createBufferSource();
        var reader = new FileReader();
        reader.onload = function(evt) {
            console.log(evt.target.result);
            audioCtx.decodeAudioData(evt.target.result).then(function(decodedData) {
                source.buffer = decodedData;

                source.connect(audioCtx.destination);
                source.start(0);
            });
        };
        reader.readAsArrayBuffer(blob)
    }

    // wire up buttons to stop and play audio

    play.onclick = function() {
        getData();
        play.setAttribute('disabled', 'disabled');
    }

    stop.onclick = function() {
        source.stop(0);

    }

}

function ScriptMP3() {



    // Create AudioContext and buffer source
    var audioCtx = new AudioContext();
    source = audioCtx.createBufferSource();

    // Create a ScriptProcessorNode with a bufferSize of 4096 and a single input and output channel
    var scriptNode = audioCtx.createScriptProcessor(4096, 1, 1);
    console.log(scriptNode.bufferSize);

    // load in an audio track via XHR and decodeAudioData

    function getData() {
        request = new XMLHttpRequest();
        request.open('GET', 'http://127.0.0.1:8080/AudioContext+canvas/mp3/%E6%9D%A8%E5%AD%90%E5%A7%97%20-%20%E5%BE%AE%E7%94%9C%E7%9A%84%E5%9B%9E%E5%BF%86.mp3', true);
        request.responseType = 'arraybuffer';
        request.onload = function() {
            var audioData = request.response;

            audioCtx.decodeAudioData(audioData, function(buffer) {
                    myBuffer = buffer;
                    source.buffer = myBuffer;
                },
                function(e) { "Error with decoding audio data" + e.err });
        }
        request.send();
    }

    // Give the node a function to process audio events
    scriptNode.onaudioprocess = function(audioProcessingEvent) {
        // The input buffer is the song we loaded earlier
        var inputBuffer = audioProcessingEvent.inputBuffer;

        // The output buffer contains the samples that will be modified and played
        var outputBuffer = audioProcessingEvent.outputBuffer;

        // Loop through the output channels (in this case there is only one)
        for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
            var inputData = inputBuffer.getChannelData(channel);
            var outputData = outputBuffer.getChannelData(channel);

            // Loop through the 4096 samples
            for (var sample = 0; sample < inputBuffer.length; sample++) {
                // make output equal to the same as the input
                outputData[sample] = inputData[sample];

                // add noise to each output sample
                outputData[sample] += ((Math.random() * 2) - 1) * 0.2;
            }
        }
    }

    getData();

    // wire up play button
    // playButton.onclick = function() {
    source.connect(scriptNode);
    scriptNode.connect(audioCtx.destination);
    source.start();
    // }

    // When the buffer source stops playing, disconnect everything
    source.onended = function() {
        source.disconnect(scriptNode);
        scriptNode.disconnect(audioCtx.destination);
    }
}

// 左右声道互换
function ac(blob = buffer) {


    // source = audioCtx.createBufferSource();
    var reader = new FileReader();
    reader.onload = function(evt) {
        var audioCtx = new AudioContext();
        audioCtx.decodeAudioData(evt.target.result, function(data) {
         var source = audioCtx.createBufferSource();
         source.buffer = data;
         var splitter = audioCtx.createChannelSplitter(2);
         source.connect(splitter);
         var merger = audioCtx.createChannelMerger(2);

         // Reduce the volume of the left channel only
         var gainNode = audioCtx.createGain();
         gainNode.gain.value = 0.5;
         splitter.connect(gainNode, 0);

         // Connect the splitter back to the second input of the merger: we
         // effectively swap the channels, here, reversing the stereo image.
         gainNode.connect(merger, 0, 1);
         splitter.connect(merger, 1, 0);

         var dest = audioCtx.createMediaStreamDestination();

         // Because we have used a ChannelMergerNode, we now have a stereo
         // MediaStream we can use to pipe the Web Audio graph to WebRTC,
         // MediaRecorder, etc.
         merger.connect(dest);
         merger.connect(audioCtx.destination);
         source.start();
        });
    };
    reader.readAsArrayBuffer(blob)


}

function _options(){
    /* 采样率
    8000Hz 电话所用采样率，对于人的说话已经足够
    11025Hz 获得的声音称为电话音质，基本上能让你分辨出通话人的声音
    22050Hz 无线电广播所用采样率，广播音质
    32000Hz miniDV数码视频camcorder、DAT(LPmode)所用采样率
    44100Hz 音频CD，也常用于MPEG-1音频（VCD，SVCD，MP3）所用采样率
    47250Hz NipponColumbia(Denon)开发的世界上第一个商用PCM录音机所用采样率
    48000Hz miniDV、数字电视、DVD、DAT、电影和专业音频所用的数字声音所用采样率
    50000Hz 二十世纪七十年代后期出现的3M和Soundstream开发的第一款商用数字录音机所用采样率
    50400Hz 三菱X-80数字录音机所用所用采样率
    96000或192000Hz DVD-Audio、一些LPCMDVD音轨、BD-ROM（蓝光盘）音轨、和HD-DVD（高清晰度DVD）音轨所用所用采样率
    28224MHz SACD、索尼和飞利浦联合开发的称为DirectStreamDigital的1位sigma-deltamodulation过程所用采样率
    总之当前声卡常用的采样频率一般为44.1KHz（每秒采集声音样本44.1千次）11KHz、22KHz、和48KHz。采样频率越高，获得的声音文件质量越好，占用存储空间也就越大。一首CD音质的歌曲会占去45M左右的存储空间。*/
    let audioCtx = new AudioContext({
      latencyHint: "playback",//balanced  interactive  playback
      sampleRate: 11025,//采样率 一般共分为11025Hz、22050Hz、24000Hz、44100Hz、48000Hz五个等级
    });

    var myAudio = document.querySelector('audio');
    var source = audioCtx.createMediaElementSource(myAudio);
  

    source.connect(audioCtx.destination);
    myAudio.play()
    // source.start();
}
// 调节音量
function rangeValue() {

    var range = document.querySelector('#range');

    // getUserMedia获取流
    // 把流放入MediaStreamAudioSourceNode
    // 输出到video元素

    if (navigator.mediaDevices) {
        console.log('getUserMedia supported.');
        navigator.mediaDevices.getUserMedia ({audio: true})
        .then(function(stream) {


            // 创建MediaStreamAudioSourceNode
            // Feed the HTMLMediaElement into it
            var audioCtx = new AudioContext();
            var source = audioCtx.createMediaStreamSource(stream);

            // 创建二阶滤波器
            var biquadFilter = audioCtx.createBiquadFilter();
            biquadFilter.type = "lowshelf";
            biquadFilter.frequency.value = 1000;
            biquadFilter.gain.value = range.value;

            // 把AudioBufferSourceNode连接到gainNode
            // gainNode连接到目的地, 所以我们可以播放
            // 音乐并用鼠标调节音量
            source.connect(biquadFilter);
            biquadFilter.connect(audioCtx.destination);

            // Get new mouse pointer coordinates when mouse is moved
            // then set new gain value

            range.oninput = function() {
                biquadFilter.gain.value = range.value;
            }
        })
        .catch(function(err) {
            console.log('The following gUM error occured: ' + err);
        });
    } else {
       console.log('getUserMedia not supported on your browser!');
    }
}