import * as speechCommands from '@tensorflow-models/speech-commands';
import * as tfvis from '@tensorflow/tfjs-vis';
const MODEL_URL = 'http://127.0.0.1:8080';
let transferRecognizer;

window.onload = async () => {
    transferRecognizer = await speechCommands.create(
        'BROWSER_FFT',
        null,
        MODEL_URL + '/speech/model.json',
        MODEL_URL + '/speech/metadata.json',
    );
    await transferRecognizer.ensureModelLoaded();
    transferRecognizer = transferRecognizer.createTransfer('轮播图');
};
window.collect = async (button) => {
    button.disabled = true;
    const label = button.innerText;
    await transferRecognizer.collectExample(
         label === '背景噪音' ? '_background_noise_' : label
        );
    button.disabled = false;
    console.log(transferRecognizer.countExamples());
};
window.train = async (button) => {
    button.disabled = true;
    await transferRecognizer.train({
        epochs: 30,
        callback: tfvis.show.fitCallbacks(
            {name: '训练结果'},
            ['loss', 'acc'],
            {callbacks: ['onEpochEnd']}
        )
    });
    button.disabled = false;
};
window.toggleTraining = async (isTraining) => {
    if (isTraining) {
        await transferRecognizer.listen(result => {
            const { scores } = result
            const labels = transferRecognizer.wordLabels()
            const index = scores.indexOf(Math.max(...scores))
            const label = labels[index]
            console.log(label)
        },{
            overlapFactor: 0,
            probabilityThreshold: 0.8
        })
    } else {
        transferRecognizer.stopListening()
    }
};
window.save = async () => {
    const arrayBuffer = transferRecognizer.serializeExamples();
    const blob = new Blob([arrayBuffer])
    const link = document.createElement('a')
    link.href = URL.createObjectURL(blob)
    link.download = 'data.bin'
    link.click()
}