File size: 1,014 Bytes
8adf940
 
5f3703f
8adf940
b66e5c2
8adf940
 
 
 
4560e11
8adf940
4fe4722
8adf940
 
 
 
 
4fe4722
e72779d
8adf940
 
4fe4722
e72779d
8adf940
4fe4722
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import torch
import torchaudio
import gradio as gr
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor

# Load the custom model from Hugging Face Spaces
model_name = "kingabzpro/wav2vec2-large-xls-r-300m-Urdu"
model = Wav2Vec2ForCTC.from_pretrained(model_name)
processor = Wav2Vec2Processor.from_pretrained(model_name)

# Define the transcribe function
def transcribe(audio):
    waveform, sample_rate = torchaudio.load(audio, normalize=True)
    input_dict = processor(waveform, return_tensors="pt", padding=True)
    logits = model(input_dict.input_values).logits
    predicted_ids = torch.argmax(logits, dim=-1).squeeze()
    transcription = processor.decode(predicted_ids)
    return transcription

# Define the interface
audio_input = gr.inputs.Audio(source="microphone", type="numpy", label="Speak or Upload Audio")
text_output = gr.outputs.Textbox(label="Transcription")

interface = gr.Interface(fn=transcribe, inputs=audio_input, outputs=text_output, title="Speech Recognition")
interface.launch()