Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,101 +1,71 @@
|
|
|
|
|
|
1 |
import torch
|
|
|
|
|
2 |
from transformers import pipeline
|
3 |
-
from transformers
|
4 |
-
import gradio as gr
|
5 |
|
6 |
-
MODEL_NAME = "openai/whisper-small"
|
7 |
-
BATCH_SIZE = 8
|
8 |
|
9 |
-
device = 0 if torch.cuda.is_available() else "cpu"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
pipe = pipeline(
|
12 |
-
task="automatic-speech-recognition",
|
13 |
-
model=MODEL_NAME,
|
14 |
-
chunk_length_s=30,
|
15 |
-
device=device,
|
16 |
-
)
|
17 |
|
18 |
|
19 |
-
# Copied from https://github.com/openai/whisper/blob/c09a7ae299c4c34c5839a76380ae407e7d785914/whisper/utils.py#L50
|
20 |
-
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = "."):
|
21 |
-
if seconds is not None:
|
22 |
-
milliseconds = round(seconds * 1000.0)
|
23 |
|
24 |
-
|
25 |
-
|
|
|
26 |
|
27 |
-
minutes = milliseconds // 60_000
|
28 |
-
milliseconds -= minutes * 60_000
|
29 |
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
32 |
|
33 |
-
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
|
34 |
-
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
|
35 |
-
else:
|
36 |
-
# we have a malformed timestamp so just return it as is
|
37 |
-
return seconds
|
38 |
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
-
def transcribe(file, task, return_timestamps):
|
41 |
-
outputs = pipe(file, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=return_timestamps)
|
42 |
-
text = outputs["text"]
|
43 |
-
if return_timestamps:
|
44 |
-
timestamps = outputs["chunks"]
|
45 |
-
timestamps = [
|
46 |
-
f"[{format_timestamp(chunk['timestamp'][0])} -> {format_timestamp(chunk['timestamp'][1])}] {chunk['text']}"
|
47 |
-
for chunk in timestamps
|
48 |
-
]
|
49 |
-
text = "\n".join(str(feature) for feature in timestamps)
|
50 |
-
return text
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
demo = gr.Blocks()
|
54 |
|
55 |
-
|
56 |
-
fn=
|
57 |
-
inputs=
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
],
|
62 |
-
outputs="text",
|
63 |
-
layout="horizontal",
|
64 |
-
theme="huggingface",
|
65 |
-
title="Whisper Demo: Transcribe Audio",
|
66 |
-
description=(
|
67 |
-
"Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
|
68 |
-
f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and π€ Transformers to transcribe audio files"
|
69 |
-
" of arbitrary length."
|
70 |
-
),
|
71 |
-
allow_flagging="never",
|
72 |
)
|
73 |
|
74 |
-
|
75 |
-
fn=
|
76 |
-
inputs=
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
outputs="text",
|
82 |
-
layout="horizontal",
|
83 |
-
theme="huggingface",
|
84 |
-
title="Whisper Demo: Transcribe Audio",
|
85 |
-
description=(
|
86 |
-
"Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
|
87 |
-
f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and π€ Transformers to transcribe audio files"
|
88 |
-
" of arbitrary length."
|
89 |
-
),
|
90 |
-
examples=[
|
91 |
-
["./example.flac", "transcribe", False],
|
92 |
-
["./example.flac", "transcribe", True],
|
93 |
-
],
|
94 |
-
cache_examples=True,
|
95 |
-
allow_flagging="never",
|
96 |
)
|
97 |
|
98 |
with demo:
|
99 |
-
gr.TabbedInterface([
|
100 |
|
101 |
-
demo.launch(
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
import torch
|
4 |
+
from datasets import load_dataset
|
5 |
+
|
6 |
from transformers import pipeline
|
7 |
+
from transformers import VitsModel, VitsTokenizer
|
|
|
8 |
|
|
|
|
|
9 |
|
10 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
11 |
+
|
12 |
+
# load speech translation checkpoint
|
13 |
+
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
model = VitsModel.from_pretrained("facebook/mms-tts-spa")
|
18 |
+
processor = VitsTokenizer.from_pretrained("facebook/mms-tts-spa")
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
def translate(audio):
|
24 |
+
outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"language": "es","task": "transcribe"})
|
25 |
+
return outputs["text"]
|
26 |
|
|
|
|
|
27 |
|
28 |
+
def synthesise(text):
|
29 |
+
inputs = processor(text=text, return_tensors="pt")
|
30 |
+
with torch.no_grad():
|
31 |
+
speech = model(inputs["input_ids"].to(device))
|
32 |
+
return speech.audio[0]
|
33 |
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
def speech_to_speech_translation(audio):
|
36 |
+
translated_text = translate(audio)
|
37 |
+
synthesised_speech = synthesise(translated_text)
|
38 |
+
synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
|
39 |
+
return 16000, synthesised_speech
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
+
title = "Cascaded STST"
|
43 |
+
description = """
|
44 |
+
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
|
45 |
+
[SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech:
|
46 |
+
![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
|
47 |
+
"""
|
48 |
|
49 |
demo = gr.Blocks()
|
50 |
|
51 |
+
mic_translate = gr.Interface(
|
52 |
+
fn=speech_to_speech_translation,
|
53 |
+
inputs=gr.Audio(source="microphone", type="filepath"),
|
54 |
+
outputs=gr.Audio(label="Generated Speech", type="numpy"),
|
55 |
+
title=title,
|
56 |
+
description=description,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
)
|
58 |
|
59 |
+
file_translate = gr.Interface(
|
60 |
+
fn=speech_to_speech_translation,
|
61 |
+
inputs=gr.Audio(source="upload", type="filepath"),
|
62 |
+
outputs=gr.Audio(label="Generated Speech", type="numpy"),
|
63 |
+
examples=[["./example.wav"]],
|
64 |
+
title=title,
|
65 |
+
description=description,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
)
|
67 |
|
68 |
with demo:
|
69 |
+
gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
|
70 |
|
71 |
+
demo.launch()
|