|
import torch |
|
from transformers import pipeline |
|
import gradio as gr |
|
|
|
MODEL_NAME = "JackismyShephard/whisper-tiny-finetuned-minds14" |
|
BATCH_SIZE = 8 |
|
|
|
device = 0 if torch.cuda.is_available() else "cpu" |
|
|
|
pipe = pipeline( |
|
task="automatic-speech-recognition", |
|
model=MODEL_NAME, |
|
chunk_length_s=30, |
|
device=device, |
|
) |
|
|
|
|
|
|
|
def format_timestamp( |
|
seconds: float, always_include_hours: bool = False, decimal_marker: str = "." |
|
): |
|
if seconds is not None: |
|
milliseconds = round(seconds * 1000.0) |
|
|
|
hours = milliseconds // 3_600_000 |
|
milliseconds -= hours * 3_600_000 |
|
|
|
minutes = milliseconds // 60_000 |
|
milliseconds -= minutes * 60_000 |
|
|
|
seconds = milliseconds // 1_000 |
|
milliseconds -= seconds * 1_000 |
|
|
|
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else "" |
|
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}" |
|
else: |
|
|
|
return seconds |
|
|
|
|
|
def transcribe(file, return_timestamps): |
|
outputs = pipe( |
|
file, |
|
batch_size=BATCH_SIZE, |
|
return_timestamps=return_timestamps, |
|
) |
|
text = outputs["text"] |
|
if return_timestamps: |
|
timestamps = outputs["chunks"] |
|
timestamps = [ |
|
f"[{format_timestamp(chunk['timestamp'][0])} -> {format_timestamp(chunk['timestamp'][1])}] {chunk['text']}" |
|
for chunk in timestamps |
|
] |
|
text = "\n".join(str(feature) for feature in timestamps) |
|
return text |
|
|
|
|
|
demo = gr.Interface( |
|
fn=transcribe, |
|
inputs=[ |
|
gr.Audio(label="Audio", type="filepath"), |
|
gr.Checkbox(label="Return timestamps"), |
|
], |
|
outputs=gr.Textbox(show_copy_button=True, label="Text"), |
|
title="Automatic Speech Recognition", |
|
description=( |
|
"Transcribe or translate long-form audio file or microphone inputs with the click of a button! Demo uses the" |
|
f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and π€ Transformers to transcribe or translate audio" |
|
" of arbitrary length." |
|
), |
|
examples=[ |
|
["examples/example.flac", False], |
|
["examples/example.flac", True], |
|
], |
|
cache_examples=True, |
|
allow_flagging="never", |
|
) |
|
|
|
demo.launch() |
|
|