const SpeechRecognition = webkitSpeechRecognition;
const SpeechGrammarList = webkitSpeechGrammarList;
const SpeechRecognitionEvent = webkitSpeechRecognitionEvent;


const recognition = new SpeechRecognition();
const speechRecognitionList = new SpeechGrammarList();
// speechRecognitionList.addFromString(grammar, 1);
// recognition.grammars = speechRecognitionList;


document.body.onclick = function() {
    recognition.start();
    console.log('Ready to receive a color command.');
};

recognition.onresult = function(event) {
    // The SpeechRecognitionEvent results property returns a SpeechRecognitionResultList object
    // The SpeechRecognitionResultList object contains SpeechRecognitionResult objects.
    // It has a getter so it can be accessed like an array
    // The [last] returns the SpeechRecognitionResult at the last position.
    // Each SpeechRecognitionResult object contains SpeechRecognitionAlternative objects that contain individual results.
    // These also have getters so they can be accessed like arrays.
    // The [0] returns the SpeechRecognitionAlternative at position 0.
    // We then return the transcript property of the SpeechRecognitionAlternative object
    //
    // var last = event.results.length - 1;
    // var color = event.results[last][0].transcript;

    // diagnostic.textContent = 'Result received: ' + color + '.';
    // bg.style.backgroundColor = color;
    console.log('识别到: ' + event.results[0][0].transcript,'正确率：' + event.results[0][0].confidence);
};

recognition.onspeechend = function() {
    console.log('stop speech');
    recognition.stop();
};

recognition.onnomatch = function(event) {
    console.log("I didn't recognise that color.");
};

recognition.onerror = function(event) {
    console.log(('Error occurred in recognition: ' + event.error));
};