import gradio as gr from transformers import pipeline import torch import librosa import soundfile SAMPLE_RATE = 16000 pipe = pipeline(model="birgermoell/whisper-small-sv-bm") # change to "your-username/the-name-you-picked" def process_audio_file(file): data, sr = librosa.load(file) if sr != SAMPLE_RATE: data = librosa.resample(data, sr, SAMPLE_RATE) # monochannel data = librosa.to_mono(data) return data def transcribe(Microphone, File_Upload): warn_output = "" if (Microphone is not None) and (File_Upload is not None): warn_output = "WARNING: You've uploaded an audio file and used the microphone. " \ "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" file = Microphone elif (Microphone is None) and (File_Upload is None): return "ERROR: You have to either use the microphone or upload an audio file" elif Microphone is not None: file = Microphone else: file = File_Upload audio_data = process_audio_file(file) text = pipe(audio_data)["text"] return warn_output + text iface = gr.Interface( fn=transcribe, inputs=[ gr.inputs.Audio(source="microphone", type='filepath', optional=True), gr.inputs.Audio(source="upload", type='filepath', optional=True), ], outputs="text", layout="horizontal", theme="huggingface", title="Whisper Small SV", description="Demo for Swedish speech recognition using the [Whisper Small SV BM checkpoint](https://huggingface.co/birgermoell/whisper-small-sv-bm).", allow_flagging='never', ) iface.launch(enable_queue=True)