Spaces:
Running
Running
let transcriber; | |
let recording = false; | |
let mediaRecorder; | |
let audioChunks = []; | |
// Function to Load Model | |
async function loadModel() { | |
document.getElementById("modelStatus").innerText = "β³ Loading Whisper model..."; | |
try { | |
const timeout = new Promise((_, reject) => | |
setTimeout(() => reject(new Error("Model took too long to load!")), 60000) // 60s timeout | |
); | |
transcriber = await Promise.race([ | |
window.Transformers.pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en'), | |
timeout | |
]); | |
document.getElementById("modelStatus").innerText = "β Model Loaded!"; | |
document.getElementById("recordButton").disabled = false; | |
document.getElementById("recordButton").innerText = "π€ Start Recording"; | |
document.getElementById("testModel").disabled = false; | |
} catch (error) { | |
document.getElementById("modelStatus").innerText = "β Model failed to load!"; | |
document.getElementById("error").innerText = error.message; | |
console.error("Error loading model:", error); | |
} | |
} | |
// Function to Test Model | |
async function testModel() { | |
try { | |
document.getElementById("status").innerText = "β³ Running test..."; | |
let output = await transcriber("https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav"); | |
document.getElementById("output").innerText = "Test Passed: " + output.text; | |
document.getElementById("status").innerText = "β Model Test Passed!"; | |
} catch (error) { | |
document.getElementById("status").innerText = "β Model Test Failed!"; | |
document.getElementById("error").innerText = error.message; | |
console.error("Test Error:", error); | |
} | |
} | |
// Function to Start Recording | |
async function startRecording() { | |
let stream = await navigator.mediaDevices.getUserMedia({ audio: true }); | |
mediaRecorder = new MediaRecorder(stream); | |
mediaRecorder.ondataavailable = (event) => { | |
audioChunks.push(event.data); | |
}; | |
mediaRecorder.onstop = async () => { | |
let audioBlob = new Blob(audioChunks, { type: 'audio/wav' }); | |
let reader = new FileReader(); | |
reader.onloadend = async () => { | |
let audioURL = reader.result; | |
document.getElementById("status").innerText = "β³ Transcribing..."; | |
try { | |
let output = await transcriber(audioURL); | |
document.getElementById("output").innerText = output.text; | |
document.getElementById("status").innerText = "β Done!"; | |
} catch (error) { | |
document.getElementById("status").innerText = "β Error during transcription."; | |
document.getElementById("error").innerText = error.message; | |
console.error(error); | |
} | |
}; | |
reader.readAsDataURL(audioBlob); | |
}; | |
mediaRecorder.start(); | |
audioChunks = []; | |
recording = true; | |
document.getElementById("recordButton").innerText = "βΉ Stop Recording"; | |
document.getElementById("status").innerText = "ποΈ Recording..."; | |
} | |
// Function to Stop Recording | |
function stopRecording() { | |
if (mediaRecorder && recording) { | |
mediaRecorder.stop(); | |
recording = false; | |
document.getElementById("recordButton").innerText = "π€ Start Recording"; | |
document.getElementById("status").innerText = "β³ Processing audio..."; | |
} | |
} | |
// Attach event listeners | |
document.getElementById("recordButton").addEventListener("click", () => { | |
if (!recording) { | |
startRecording(); | |
} else { | |
stopRecording(); | |
} | |
}); | |
document.getElementById("testModel").addEventListener("click", testModel); | |
// Load model on page start | |
loadModel(); | |