stt_test_02 / app.py
Shanuka01's picture
Update app.py
08a9934
raw
history blame contribute delete
745 Bytes
import torch
import gradio as gr
from transformers import pipeline
model_id = "openai/whisper-large" # Using the model you provided
pipe = pipeline("automatic-speech-recognition", model=model_id)
title = "Automatic Speech Recognition"
description = """
"""
def transcribe_speech(filepath):
output = pipe(
filepath,
generate_kwargs={
"task": "transcribe",
"language": "english", # Set to English
},
)
return output["text"]
file_transcribe = gr.Interface(
fn=transcribe_speech,
inputs=gr.Audio(source="upload", type="filepath"),
outputs=gr.outputs.Textbox(),
examples=[["./example.wav"]],
title=title,
description=description,
)
file_transcribe.launch()