from transformers import pipeline import gradio as gr model_id = "mmhamdy/whisper-tiny-finetuned-minds14-en-us" pipe = pipeline("automatic-speech-recognition", model=model_id) def transcribe_speech(filepath): output = pipe( filepath, max_new_tokens=256, generate_kwargs={ "task": "transcribe", "language": "english", }, chunk_length_s=30, batch_size=8, ) return output["text"] demo = gr.Blocks() mic_transcribe = gr.Interface( fn=transcribe_speech, inputs=gr.Audio(source="microphone", type="filepath"), outputs=gr.outputs.Textbox(), ) file_transcribe = gr.Interface( fn=transcribe_speech, inputs=gr.Audio(source="upload", type="filepath"), outputs=gr.outputs.Textbox(), ) with demo: gr.TabbedInterface( [mic_transcribe, file_transcribe], ["Transcribe Microphone", "Transcribe Audio File"], ) demo.launch(debug=True)