Spaces:
Runtime error
Runtime error
import torch | |
import gradio as gr | |
from transformers import pipeline | |
model_id = "openai/whisper-large" # Using the model you provided | |
pipe = pipeline("automatic-speech-recognition", model=model_id) | |
title = "Automatic Speech Recognition" | |
description = """ | |
""" | |
def transcribe_speech(filepath): | |
output = pipe( | |
filepath, | |
generate_kwargs={ | |
"task": "transcribe", | |
"language": "english", # Set to English | |
}, | |
) | |
return output["text"] | |
file_transcribe = gr.Interface( | |
fn=transcribe_speech, | |
inputs=gr.Audio(source="upload", type="filepath"), | |
outputs=gr.outputs.Textbox(), | |
examples=[["./example.wav"]], | |
title=title, | |
description=description, | |
) | |
file_transcribe.launch() | |