import whisper import gradio as gr def get_text_from_mp3_whisper(mp3_file: str)->str: model = whisper.load_model("base") # options = whisper.DecodingOptions(language="en", without_timestamps=True) result = model.transcribe(mp3_file) return result.get("text", "No text found"), result.get("segments", {}) gr.Interface( title = 'OpenAI Whisper Transcribe audio files to text', fn=get_text_from_mp3_whisper, inputs=[ gr.inputs.Audio(type="filepath") ], outputs=[ "textbox", "json" ], live=True).launch()