import torch import torchaudio import gradio as gr from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor # Load the model and processor model_name = "kingabzpro/wav2vec2-large-xlsr-300m-urdu" model = Wav2Vec2ForCTC.from_pretrained(model_name) processor = Wav2Vec2Processor.from_pretrained(model_name) # Define the transcribe function def transcribe(audio): waveform, sample_rate = torchaudio.load(audio.name) input_dict = processor(waveform, return_tensors="pt", padding=True) logits = model(input_dict.input_values).logits predicted_ids = torch.argmax(logits, dim=-1).squeeze() transcription = processor.decode(predicted_ids) return transcription # Define the interface audio_input = gr.inputs.Audio(source="upload", type="file", label="Upload audio file") text_output = gr.outputs.Textbox(label="Transcription") interface = gr.Interface(fn=transcribe, inputs=audio_input, outputs=text_output, title="Urdu Speech Recognition") interface.launch(share=True)