Spaces:
Runtime error
Runtime error
File size: 3,329 Bytes
0154d63 a9ccbef 0154d63 6a27ecf 25280f1 0154d63 25280f1 0154d63 25280f1 0154d63 a9ccbef 6a27ecf a9ccbef 6a27ecf a9ccbef 0154d63 6a27ecf 0154d63 a9ccbef 0154d63 909faf0 0154d63 6a27ecf 0154d63 835f922 23439c2 0154d63 23439c2 4c25fd7 23439c2 25280f1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import gradio as gr
import numpy as np
import torch
from datasets import load_dataset
from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# load speech translation checkpoint
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-large-v2", device=device)
# load text-to-speech checkpoint and speaker embeddings
model_id = "ckandemir/speecht5_finetuned_voxpopuli_fr" # update with your model id
# pipe = pipeline("automatic-speech-recognition", model=model_id)
model = SpeechT5ForTextToSpeech.from_pretrained(model_id)
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
speaker_embeddings = torch.tensor(embeddings_dataset[7440]["xvector"]).unsqueeze(0)
processor = SpeechT5Processor.from_pretrained(model_id)
replacements = [
("à", "a"), ("â", "a"),
("ç", "c"),
("é", "e"), ("è", "e"), ("ê", "e"), ("ë", "e"),
("î", "i"), ("ï", "i"),
("ô", "o"),
("ù", "u"), ("û", "u"),
]
def cleanup_text(text):
for src, dst in replacements:
text = text.replace(src, dst)
return text
def translate(audio):
outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "french"})
return outputs["text"]
def synthesise(text):
text = cleanup_text(text)
inputs = processor(text=text, return_tensors="pt")
speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
return speech.cpu()
def speech_to_speech_translation(audio):
translated_text = translate(audio)
synthesised_speech = synthesise(translated_text)
synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
return 16000, synthesised_speech
title = "Cascaded STST"
description = """
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in French. Demo uses OpenAI's [Whisper Large v2](https://huggingface.co/openai/whisper-large-v2) model for speech translation, and [ckandemir/speecht5_finetuned_voxpopuli_fr"](https://huggingface.co/ckandemir/speecht5_finetuned_voxpopuli_fr) checkpoint for text-to-speech, which is based on Microsoft's
[SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech, fine-tuned in French Audio dataset:
![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
"""
demo = gr.Blocks()
mic_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(source="microphone", type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
title=title,
description=description,
)
file_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(source="upload", type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
examples=[["./example.wav"]],
title=title,
description=description,
)
with demo:
gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
demo.launch(share=True)
|