Spaces:
Runtime error
Runtime error
feat: app init
Browse files- app.py +97 -0
- package.txt +1 -0
- requirements.txt +3 -0
app.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import pipeline
|
3 |
+
from transformers.pipelines.audio_utils import ffmpeg_read
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
MODEL_NAME = "oyemade/w2v-bert-2.0-yoruba-colab-CV16.1"
|
7 |
+
BATCH_SIZE = 8
|
8 |
+
|
9 |
+
device = 0 if torch.cuda.is_available() else "cpu"
|
10 |
+
|
11 |
+
pipe = pipeline(
|
12 |
+
task="automatic-speech-recognition",
|
13 |
+
model=MODEL_NAME,
|
14 |
+
chunk_length_s=30,
|
15 |
+
device=device,
|
16 |
+
)
|
17 |
+
|
18 |
+
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = "."):
|
19 |
+
if seconds is not None:
|
20 |
+
milliseconds = round(seconds * 1000.0)
|
21 |
+
|
22 |
+
hours = milliseconds // 3_600_000
|
23 |
+
milliseconds -= hours * 3_600_000
|
24 |
+
|
25 |
+
minutes = milliseconds // 60_000
|
26 |
+
milliseconds -= minutes * 60_000
|
27 |
+
|
28 |
+
seconds = milliseconds // 1_000
|
29 |
+
milliseconds -= seconds * 1_000
|
30 |
+
|
31 |
+
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
|
32 |
+
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
|
33 |
+
else:
|
34 |
+
# we have a malformed timestamp so just return it as is
|
35 |
+
return seconds
|
36 |
+
|
37 |
+
|
38 |
+
def transcribe(file, return_timestamps):
|
39 |
+
outputs = pipe(file, batch_size=BATCH_SIZE, return_timestamps=return_timestamps)
|
40 |
+
text = outputs["text"]
|
41 |
+
if return_timestamps:
|
42 |
+
timestamps = outputs["chunks"]
|
43 |
+
timestamps = [
|
44 |
+
f"[{format_timestamp(chunk['timestamp'][0])} -> {format_timestamp(chunk['timestamp'][1])}] {chunk['text']}"
|
45 |
+
for chunk in timestamps
|
46 |
+
]
|
47 |
+
text = "\n".join(str(feature) for feature in timestamps)
|
48 |
+
return text
|
49 |
+
|
50 |
+
|
51 |
+
demo = gr.Blocks()
|
52 |
+
|
53 |
+
mic_transcribe = gr.Interface(
|
54 |
+
fn=transcribe,
|
55 |
+
inputs=[
|
56 |
+
gr.inputs.Audio(source="microphone", type="filepath", optional=True),
|
57 |
+
gr.inputs.Checkbox(default=False, label="Return timestamps"),
|
58 |
+
],
|
59 |
+
outputs="text",
|
60 |
+
layout="horizontal",
|
61 |
+
theme="huggingface",
|
62 |
+
title="Yoruba Transcription Demo: Transcribe Audio",
|
63 |
+
description=(
|
64 |
+
"Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
|
65 |
+
f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
|
66 |
+
" of arbitrary length."
|
67 |
+
),
|
68 |
+
allow_flagging="never",
|
69 |
+
)
|
70 |
+
|
71 |
+
file_transcribe = gr.Interface(
|
72 |
+
fn=transcribe,
|
73 |
+
inputs=[
|
74 |
+
gr.inputs.Audio(source="upload", optional=True, label="Audio file", type="filepath"),
|
75 |
+
gr.inputs.Checkbox(default=False, label="Return timestamps"),
|
76 |
+
],
|
77 |
+
outputs="text",
|
78 |
+
layout="horizontal",
|
79 |
+
theme="huggingface",
|
80 |
+
title="Yoruba Transcription Demo: Transcribe Audio",
|
81 |
+
description=(
|
82 |
+
"Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
|
83 |
+
f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
|
84 |
+
" of arbitrary length."
|
85 |
+
),
|
86 |
+
# examples=[
|
87 |
+
# ["./example.flac", "transcribe", False],
|
88 |
+
# ["./example.flac", "transcribe", True],
|
89 |
+
# ],
|
90 |
+
cache_examples=True,
|
91 |
+
allow_flagging="never",
|
92 |
+
)
|
93 |
+
|
94 |
+
with demo:
|
95 |
+
gr.TabbedInterface([mic_transcribe, file_transcribe], ["Transcribe Microphone", "Transcribe Audio File"])
|
96 |
+
|
97 |
+
demo.launch(enable_queue=True)
|
package.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ffmpeg
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
--extra-index-url https://download.pytorch.org/whl/cu113
|
2 |
+
torch
|
3 |
+
transformers
|