just / script.js
krishna195's picture
Update script.js
690a40d verified
raw
history blame
3.77 kB
let transcriber;
let recording = false;
let mediaRecorder;
let audioChunks = [];
// Function to Load Model
async function loadModel() {
document.getElementById("modelStatus").innerText = "⏳ Loading Whisper model...";
try {
const timeout = new Promise((_, reject) =>
setTimeout(() => reject(new Error("Model took too long to load!")), 60000) // 60s timeout
);
transcriber = await Promise.race([
window.Transformers.pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en'),
timeout
]);
document.getElementById("modelStatus").innerText = "βœ… Model Loaded!";
document.getElementById("recordButton").disabled = false;
document.getElementById("recordButton").innerText = "🎀 Start Recording";
document.getElementById("testModel").disabled = false;
} catch (error) {
document.getElementById("modelStatus").innerText = "❌ Model failed to load!";
document.getElementById("error").innerText = error.message;
console.error("Error loading model:", error);
}
}
// Function to Test Model
async function testModel() {
try {
document.getElementById("status").innerText = "⏳ Running test...";
let output = await transcriber("https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav");
document.getElementById("output").innerText = "Test Passed: " + output.text;
document.getElementById("status").innerText = "βœ… Model Test Passed!";
} catch (error) {
document.getElementById("status").innerText = "❌ Model Test Failed!";
document.getElementById("error").innerText = error.message;
console.error("Test Error:", error);
}
}
// Function to Start Recording
async function startRecording() {
let stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.ondataavailable = (event) => {
audioChunks.push(event.data);
};
mediaRecorder.onstop = async () => {
let audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
let reader = new FileReader();
reader.onloadend = async () => {
let audioURL = reader.result;
document.getElementById("status").innerText = "⏳ Transcribing...";
try {
let output = await transcriber(audioURL);
document.getElementById("output").innerText = output.text;
document.getElementById("status").innerText = "βœ… Done!";
} catch (error) {
document.getElementById("status").innerText = "❌ Error during transcription.";
document.getElementById("error").innerText = error.message;
console.error(error);
}
};
reader.readAsDataURL(audioBlob);
};
mediaRecorder.start();
audioChunks = [];
recording = true;
document.getElementById("recordButton").innerText = "⏹ Stop Recording";
document.getElementById("status").innerText = "πŸŽ™οΈ Recording...";
}
// Function to Stop Recording
function stopRecording() {
if (mediaRecorder && recording) {
mediaRecorder.stop();
recording = false;
document.getElementById("recordButton").innerText = "🎀 Start Recording";
document.getElementById("status").innerText = "⏳ Processing audio...";
}
}
// Attach event listeners
document.getElementById("recordButton").addEventListener("click", () => {
if (!recording) {
startRecording();
} else {
stopRecording();
}
});
document.getElementById("testModel").addEventListener("click", testModel);
// Load model on page start
loadModel();