var audioCtx = new AudioContext();
var sst_url = 'http://www.google.com/speech-api/v2/recognize?client=chromium&lang=en-US&key=AIzaSyCJ6SM4SxRs_CloTipzKQ4N3i8tZjX65cE';
var sst_url_ = 'http://www.google.com/speech-api/v2/recognize?client=chromium&lang=en-US&key=AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw';
// You might want to use this second API url. The key is not mine, however.
var chat_api = 'https://talkingkai-api.herokuapp.com/';
// Please don't use this API in your code. Instead, run your own
//  on Heroku or somewhere else

var canvas = document.querySelector("canvas");
var canvasCtx = canvas.getContext("2d");
var usersaid = document.querySelector("#user");

function merge_array(ary_of_ary) {
    var total_size = 0;
    ary_of_ary.map(item => {total_size += item.length});
    var total = new ary_of_ary[0].constructor(total_size);
    var offset = 0;
    for (let ary of ary_of_ary) {
        total.set(ary, offset);
        offset += ary.length;
    }
    return total;
}

// This code is from Sole https://soledadpenades.com/
function visualize(stream) {
  if(!audioCtx) {
    audioCtx = new AudioContext();
  }

  const source = audioCtx.createMediaStreamSource(stream);

  const analyser = audioCtx.createAnalyser();
  analyser.fftSize = 2048;
  const bufferLength = analyser.frequencyBinCount;
  const dataArray = new Uint8Array(bufferLength);

  source.connect(analyser);
  //analyser.connect(audioCtx.destination);

  draw()

  function draw() {
    const WIDTH = canvas.width
    const HEIGHT = canvas.height;

    requestAnimationFrame(draw);

    analyser.getByteTimeDomainData(dataArray);

    canvasCtx.fillStyle = 'rgb(200, 200, 200)';
    canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);

    canvasCtx.lineWidth = 2;
    canvasCtx.strokeStyle = 'rgb(0, 0, 0)';

    canvasCtx.beginPath();

    let sliceWidth = WIDTH * 1.0 / bufferLength;
    let x = 0;


    for(let i = 0; i < bufferLength; i++) {

      let v = dataArray[i] / 128.0;
      let y = v * HEIGHT/2;

      if(i === 0) {
        canvasCtx.moveTo(x, y);
      } else {
        canvasCtx.lineTo(x, y);
      }

      x += sliceWidth;
    }

    canvasCtx.lineTo(canvas.width, canvas.height/2);
    canvasCtx.stroke();

  }
}

document.onkeypress = e => {
    if (e.key === "Backspace") {
        window.close();
    }
    if (e.key === "Enter") {
        alert("Code by Farooq Karimi Zadeh under ISC:\nhttps://notabug.org/bananaphone/talkingbot\nhttps://github.com/farooqkz/talkingbot_api");
    }
};


var req = new XMLHttpRequest({mozSystem: true});
req.onload = () => {
    for (let resp of req.responseText.split("\n")) {
    console.log(resp);
    if (resp.trim() === "") continue;
    var json = JSON.parse(resp);
    if (json.result_index != undefined) {
        var alt = json.result[json.result_index].alternative;
        var text = alt[0].transcript;
        usersaid.innerText = "User said: " + text;
        console.log("User said: " + text);
        text = encodeURIComponent(text);
        var req_ = new XMLHttpRequest({mozSystem: true});
        req_.open("GET", chat_api + "?text=" + text, true);
        req_.onload = () => {
            var resp = req_.responseText;
            var u = new SpeechSynthesisUtterance(resp);
            speechSynthesis.speak(u);
        };
        req_.send();
    } else {
        console.log("<NOTHING>");
        usersaid.innerText = "<NOTHING>";
    }
    }
};

navigator.mediaDevices.getUserMedia({audio: true})
    .then(stream => {try{
        console.log("Hmm");
        visualize(stream);
        var source = audioCtx.createMediaStreamSource(stream);
        var scriptNode = audioCtx.createScriptProcessor(0, 1, 1);
        var spokenData = [];
        scriptNode.onaudioprocess = e => {
            console.log("moo moo");
            spokenData.push(e.inputBuffer.getChannelData(0));
        };
        var process = () => {try{
            if (spokenData.length === 0) {
                return;
            }
            spokenData = merge_array(spokenData);
            var spoken16 = new Int16Array(parseInt(spokenData.length/3) + 2);
            // 16 bit signed and 16 khz
            var index = 0;
            for (let s of spokenData) {
                if (index%3 == 0) {
                    var v = s < 0? 32768 * s : 32767 * s;
                    spoken16[index/3] = Math.max(-32768, Math.min(32768, v));
                }
                index++;
            }
            spokenData = [];
            req.open("POST", sst_url, true);
            req.setRequestHeader("Content-Type", "audio/l16; rate=16000");
            req.send(spoken16);
            }catch(e){console.log(e);}
        };
        /*
        var options = {
            source: source,
            voice_stop: onstop,
            voice_start: onstart,
            smoothingTimeConstant: 0.9
        };*/
        //var vad = new VAD(options);
        document.onkeydown = e => {
            if (e.key === "Call" || e.key === "5") {
                source.connect(scriptNode);
                //scriptNode.connect(audioCtx.destination);
            }
        };
        document.onkeyup = e => {
            if (e.key === "Call" || e.key === "5") {
                source.disconnect(scriptNode);
                //scriptNode.disconnect(audioCtx.destination);
                process();
            }
        };
    }catch(e){console.log(e);}});
