import os os.system("wget https://www.isi.edu/~ulf/uroman/downloads/uroman-v1.2.7.tar.gz") os.system("mkdir uroman") os.system("tar -zxvf ./uroman-v1.2.7.tar.gz -C ./uroman") os.system("chmod +x ./uroman/bin/uroman.pl") import gradio as gr import numpy as np import torch from datasets import load_dataset from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline device = "cuda:0" if torch.cuda.is_available() else "cpu" # load speech translation checkpoint asr_pipe = pipeline("automatic-speech-recognition", model="KoRiF/whisper-small-be", device=device) # load text-to-speech checkpoint and speaker embeddings processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") model = SpeechT5ForTextToSpeech.from_pretrained("KoRiF/speecht5_finetuned_common_voice_be").to(device) vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device) embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) def translate(audio, transliteration = lambda txt: txt): outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "be"})#larusian return transliteration(outputs["text"]) import subprocess def transliterate_text(text, lang_code=None, use_chart=False, use_cache=True): command = ['perl', './uroman/bin/uroman.pl'] if lang_code: command.extend(['-l', lang_code]) if use_chart: command.append('--chart') if not use_cache: command.append('--no-cache') process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) output, error = process.communicate(input=text) if (error): print(f"Error: >>> {error}") return output.strip() language = 'bel' def transliterate(text): return transliterate_text(text, language) def synthesise(text): inputs = processor(text=text, return_tensors="pt") speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder) return speech.cpu() target_dtype = np.int16 max_range = np.iinfo(target_dtype).max def speech_to_speech_translation(audio): translated_text = translate(audio, transliterate)# synthesised_speech = synthesise(translated_text) synthesised_speech = (synthesised_speech.numpy() * max_range).astype(np.int16) return 16000, synthesised_speech title = "Cascaded STST" description = """ Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's [SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech: ![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation") """ demo = gr.Blocks() mic_translate = gr.Interface( fn=speech_to_speech_translation, inputs=gr.Audio(source="microphone", type="filepath"), outputs=gr.Audio(label="Generated Speech", type="numpy"), title=title, description=description, ) file_translate = gr.Interface( fn=speech_to_speech_translation, inputs=gr.Audio(source="upload", type="filepath"), outputs=gr.Audio(label="Generated Speech", type="numpy"), #examples=[["./example.wav"]], title=title, description=description, ) with demo: gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"]) demo.launch()