Spaces:
Runtime error
Runtime error
File size: 2,284 Bytes
511d4bd a54c666 511d4bd a54c666 511d4bd 1f3c204 a54c666 511d4bd a54c666 5a4d101 a54c666 511d4bd a54c666 511d4bd a54c666 511d4bd 5a4d101 a54c666 511d4bd a54c666 b71b224 a54c666 511d4bd a54c666 5a4d101 511d4bd a54c666 511d4bd a54c666 511d4bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import gradio as gr
import numpy as np
import torch
from datasets import load_dataset
from transformers import pipeline
from transformers import VitsModel, VitsTokenizer
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# load speech translation checkpoint
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
model = VitsModel.from_pretrained("facebook/mms-tts-spa")
processor = VitsTokenizer.from_pretrained("facebook/mms-tts-spa")
def translate(audio):
outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"language": "es","task": "transcribe"})
return outputs["text"]
def synthesise(text):
inputs = processor(text=text, return_tensors="pt")
with torch.no_grad():
speech = model(inputs["input_ids"].to(device))
return speech.audio[0]
def speech_to_speech_translation(audio):
translated_text = translate(audio)
synthesised_speech = synthesise(translated_text)
synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
return 16000, synthesised_speech
title = "Cascaded STST"
description = """
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
[SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech:
![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
"""
demo = gr.Blocks()
mic_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(source="microphone", type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
title=title,
description=description,
)
file_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(source="upload", type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
examples=[["./example.wav"]],
title=title,
description=description,
)
with demo:
gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
demo.launch()
|