Arsala Grey
demo ready for review
4098e9b
const { createApp, ref, onMounted, computed, watch } = Vue;
import { HfInference } from "https://cdn.skypack.dev/@huggingface/inference@latest";
const app = createApp({
setup() {
const token = ref(localStorage.getItem("token") || "");
const models = ref(["openai/whisper-tiny", "facebook/wav2vec2-large-960h-lv60-self", "openai/whisper-large-v2"]);
const selectedAudio = ref("clear-audio-1.wav");
const selectedModel = ref("");
const loading = ref(false);
const didErrorOccur = ref(false)
const audioFiles = ref(["clear-audio-1.wav", "clear-audio-2.wav",
"unclear-audio-1.wav", "unclear-audio-2.wav"]);
const recognizedText = ref("")
const statusMessage = computed(() => {
if (loading.value) return "Loading..."
return "Ready"
})
const run = async () => {
reset()
loading.value = true;
try {
const hf = new HfInference(token.value);
const audioData = await (await fetch(selectedAudio.value)).arrayBuffer()
const result = await hf.automaticSpeechRecognition({
data: audioData,
model: selectedModel.value
});
console.log(result)
recognizedText.value = result.text
loading.value = false;
} catch (e) {
console.error(e);
loading.value = false;
didErrorOccur.value = true
}
};
const reset = () => {
didErrorOccur.value = false
loading.value = false
recognizedText.value = ""
}
watch(selectedAudio, () => {
reset()
})
watch(selectedModel, () => {
reset()
})
onMounted(async () => {
const localStorageToken = localStorage.getItem("token")
if (localStorageToken) {
token.value = localStorageToken;
}
selectedModel.value = models.value[0]
});
return {
token,
run,
audioFiles,
selectedAudio,
models,
selectedModel,
loading,
statusMessage,
recognizedText
};
},
});
app.mount("#app");