File size: 3,732 Bytes
8d963ea f8140d5 8d963ea f8140d5 8d963ea f8140d5 8d963ea f8140d5 8d963ea f8140d5 8d963ea f8140d5 8d963ea f8140d5 8d963ea f8140d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
!apt-get install -y perl
!wget https://www.isi.edu/~ulf/uroman/downloads/uroman-v1.2.7.tar.gz
!mkdir uroman
!tar -zxvf ./uroman-v1.2.7.tar.gz -C ./uroman
!chmod +x ./uroman/bin/uroman.pl
import gradio as gr
import numpy as np
import torch
from datasets import load_dataset
from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# load speech translation checkpoint
asr_pipe = pipeline("automatic-speech-recognition", model="KoRiF/whisper-small-be", device=device)
# load text-to-speech checkpoint and speaker embeddings
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
model = SpeechT5ForTextToSpeech.from_pretrained("KoRiF/speecht5_finetuned_common_voice_be").to(device)
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
def translate(audio, transliteration = lambda txt: txt):
outputs = pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "be"})#larusian
return transliteration(outputs["text"])
import subprocess
def transliterate_text(text, lang_code=None, use_chart=False, use_cache=True):
command = ['perl', './uroman/bin/uroman.pl']
if lang_code:
command.extend(['-l', lang_code])
if use_chart:
command.append('--chart')
if not use_cache:
command.append('--no-cache')
process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
output, error = process.communicate(input=text)
if (error):
print(f"Error: >>> {error}")
return output.strip()
language = 'bel'
def transliterate(text):
return transliterate_text(text, language)
def synthesise(text):
inputs = processor(text=text, return_tensors="pt")
speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
return speech.cpu()
target_dtype = np.int16
max_range = np.iinfo(target_dtype).max
def speech_to_speech_translation(audio):
translated_text = translate(audio, transliterate)#
synthesised_speech = synthesise(translated_text)
synthesised_speech = (synthesised_speech.numpy() * max_range).astype(np.int16)
return 16000, synthesised_speech
title = "Cascaded STST"
description = """
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
[SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech:
![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
"""
demo = gr.Blocks()
mic_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(source="microphone", type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
title=title,
description=description,
)
file_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(source="upload", type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
#examples=[["./example.wav"]],
title=title,
description=description,
)
with demo:
gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
demo.launch()
|