import gradio as gr from transformers import pipeline import numpy as np transcriber = pipeline("automatic-speech-recognition", model="filipzawadka/whisper-small-pl-2") #transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-small.en") def transcribe(audio): print(audio) return transcriber("./common_voice_pl_38051254.mp3")["text"] demo = gr.Interface( fn=transcribe, inputs=gr.Audio(sources=["microphone"],type="filepath"), outputs="text", ) demo.launch()