import gradio as gr from transformers import pipeline import numpy as np #transcriber = pipeline("automatic-speech-recognition", model="filipzawadka/whisper-small-pl-2") transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-small.en") def transcribe(audio): return transcriber(audio)["text"] demo = gr.Interface( fn=transcribe, inputs=gr.Audio(sources=["microphone"],type="filepath"), outputs="text", ) demo.launch()