Spaces:
Runtime error
Runtime error
File size: 1,484 Bytes
1c09abb 6cced05 c8e1c36 1c09abb 6cced05 1c09abb 6cced05 1c09abb 7499088 6cced05 b6c0e77 3a4ad15 69f6d78 2e74d79 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import gradio as gr
import nemo.collections.asr as nemo_asr
from pydub import AudioSegment
import pyaudioconvert as pac
hf_model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained(
model_name="mbazaNLP/Kinyarwanda_nemo_stt_conformer_model")
def convert (audio):
file_name = audio.name
if file_name.endswith("mp3") or file_name.endswith("wav") or file_name.endswith("ogg"):
if file_name.endswith("mp3"):
sound = AudioSegment.from_mp3(audio.name)
sound.export(audio.name, format="wav")
elif file_name.endswith("ogg"):
sound = AudioSegment.from_ogg(audio.name)
sound.export(audio.name, format="wav")
else:
return False
pac.convert_wav_to_16bit_mono(audio.name,audio.name)
return True
def transcribe(audio, audio_microphone):
audio = audio_microphone if audio_microphone else audio
if convert(audio)== False:
return "The format must be mp3,wav and ogg"
result= hf_model.transcribe([audio.name])
return result[0]
gradio_ui = gr.Interface(
fn=transcribe,
title="Kinyarwanda Speech Recognition",
description="Upload an audio clip or record from browser using microphone, and let AI do the hard work of transcribing.",
inputs=[gr.inputs.Audio(label="Upload Audio File", type="file", optional=True), gr.inputs.Audio(source="microphone", type="file", optional=True, label="Record from microphone")],
outputs=[gr.outputs.Textbox(label="Recognized speech")]
)
gradio_ui.launch(enable_queue=True)
|