from faster_whisper import WhisperModel import gradio as gr model_size = "small" # Run on GPU with FP16 model = WhisperModel(model_size, device="cpu", compute_type="int8") def transcribe(audio): segments, _ = model.transcribe(audio, beam_size=5) return "".join([segment.text for segment in segments]) gr.Interface( title = 'Fast Whisper for Speech Recognition', fn=transcribe, inputs=[ gr.inputs.Audio(source="microphone", type="filepath") ], outputs=[ "textbox" ] ).launch()