Spaces:
Runtime error
Runtime error
import torch | |
import torchaudio | |
import gradio as gr | |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor | |
# Load the custom model from Hugging Face Spaces | |
model_name = "kingabzpro/wav2vec2-large-xls-r-300m-Urdu" | |
model = Wav2Vec2ForCTC.from_pretrained(model_name) | |
processor = Wav2Vec2Processor.from_pretrained(model_name) | |
# Define the transcribe function | |
def transcribe(audio): | |
waveform, sample_rate = torchaudio.load(audio, normalize=True) | |
input_dict = processor(waveform, return_tensors="pt", padding=True) | |
logits = model(input_dict.input_values).logits | |
predicted_ids = torch.argmax(logits, dim=-1).squeeze() | |
transcription = processor.decode(predicted_ids) | |
return transcription | |
# Define the interface | |
audio_input = gr.inputs.Audio(source="microphone", type="numpy", label="Speak or Upload Audio") | |
text_output = gr.outputs.Textbox(label="Transcription") | |
interface = gr.Interface(fn=transcribe, inputs=audio_input, outputs=text_output, title="Speech Recognition") | |
interface.launch() | |