Spaces:
Runtime error
Runtime error
File size: 3,490 Bytes
d347764 43f3b59 5989ff4 d347764 f63ec58 b211a70 26f8e67 d347764 f63ec58 d09a346 f63ec58 d347764 f63ec58 d347764 26f8e67 b211a70 1b46601 d347764 f805e49 bda46a5 f805e49 c737803 d347764 226ec3a d347764 f805e49 d347764 c737803 3946ba6 c737803 d347764 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
import gradio as gr
import numpy as np
import torch
from datasets import load_dataset
from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
# from tokenization_small100 import SMALL100Tokenizer
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# load speech translation checkpoint
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device, generate_kwargs = {"task": "translate"})
m2m100_en_sw = pipeline('translation', 'facebook/m2m100_418M', src_lang='en', tgt_lang="sw")
# load text-to-speech checkpoint and speaker embeddings
# processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
# debug build
processor = SpeechT5Processor.from_pretrained("samuelleecong/speecht5_finetuned_swahili")
model = SpeechT5ForTextToSpeech.from_pretrained("samuelleecong/speecht5_finetuned_swahili").to(device)
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
def translate(audio):
outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
output_translated = m2m100_en_sw(outputs["text"])
# encoded_text = tokenizer(outputs["text"], return_tensors="pt")
# generated_tokens = model.generate(**encoded_text)
# output_translated = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
# return outputs["text"]
return output_translated[0]["translation_text"]
def synthesise(text):
inputs = processor(text=text, return_tensors="pt")
speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
return speech.cpu()
def speech_to_speech_translation(audio):
translated_text = translate(audio)
synthesised_speech = synthesise(translated_text)
synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
return 16000, synthesised_speech
title = "Cascaded STST"
description = """
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in Swahili. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech transcription, Meta's [M2M100](https://huggingface.co/facebook/m2m100_418M) for translation, and Microsoft's
[SpeechT5 TTS](https://huggingface.co/samuelleecong/speecht5_finetuned_swahili) that I fine-tuned model for Swahili text-to-speech:
![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
"""
demo = gr.Blocks()
mic_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(source="microphone", type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
title=title,
description=description,
)
file_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(source="upload", type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
examples=[["./example.wav"]],
title=title,
description=description,
)
with demo:
gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
demo.launch()
|