import whisper import gradio as gr import time # You can choose your model from - see it on readme file and update the modelname modelname = "large-v2" model = whisper.load_model(modelname) def SpeechToText(audio): if audio == None : return "" time.sleep(1) audio = whisper.load_audio(audio) audio = whisper.pad_or_trim(audio) # Decode audio to Text # options = whisper.DecodingOptions(fp16 = False) result = model.transcribe(audio, language="hy") return result["text"] gr.Interface( title = 'OpenAI Whisper implementation on Gradio Web UI', fn=SpeechToText, inputs=[ gr.Audio(source="microphone", type="filepath") ], outputs=[ "textbox", ], live=True ).launch( debug=False )