File size: 4,377 Bytes
785694e
 
28456aa
785694e
 
 
 
8e59fb5
785694e
 
 
 
 
 
 
6fdf224
785694e
6fdf224
 
785694e
6fdf224
785694e
6fdf224
 
785694e
6fdf224
 
 
8e59fb5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6fdf224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
785694e
6fdf224
c3d13cb
785694e
 
 
 
09896ad
 
785694e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import gradio as gr
import numpy as np
import os
import torch
from datasets import load_dataset

from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
from speechbrain.pretrained import EncoderClassifier

device = "cuda:0" if torch.cuda.is_available() else "cpu"

# load speech translation checkpoint
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)

# load text-to-speech checkpoint and speaker embeddings
# processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")

# model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
# vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)

processor = SpeechT5Processor.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")

model = SpeechT5ForTextToSpeech.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl").to(device)
vocoder = SpeechT5HifiGan.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl").to(device)

# embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
# speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)

spk_model_name = "speechbrain/spkrec-xvect-voxceleb"

device = "cuda" if torch.cuda.is_available() else "cpu"
speaker_model = EncoderClassifier.from_hparams(
    source=spk_model_name,
    run_opts={"device": device},
    savedir=os.path.join("/tmp", spk_model_name),
)

def create_speaker_embedding(waveform):
    with torch.no_grad():
        speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform))
        speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2)
        speaker_embeddings = speaker_embeddings.squeeze().cpu().numpy()
    return speaker_embeddings


dataset_nl = load_dataset("facebook/voxpopuli", "nl", split="train", streaming=True)
data_list = []
speaker_embeddings_list = []

for i, data in enumerate(iter(dataset_nl)):
    # print(i)
    if(i > 5):
      break
    data_list.append(data)
    # data = next(iter(dataset_nl))
    text = data["raw_text"]
    # print(data)
    speaker_embeddings = create_speaker_embedding(data["audio"]["array"])
    speaker_embeddings = torch.tensor(speaker_embeddings)[None]
    speaker_embeddings_list.append(speaker_embeddings)


speaker_embeddings = speaker_embeddings_list[4]
    
def translate(audio):
    # outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
    outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"language":"<|nl|>","task": "transcribe"})
    return outputs["text"]


def synthesise(text):
    #inputs = processor(text=text, return_tensors="pt")
    inputs = processor(text=text, return_tensors="pt", truncation=True, max_length=200)
    speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
    return speech.cpu()


def speech_to_speech_translation(audio):
    translated_text = translate(audio)
    synthesised_speech = synthesise(translated_text)
    synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
    return 16000, synthesised_speech


title = "Cascaded STST"
description = """
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
[SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech:

![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
"""

demo = gr.Blocks()

mic_translate = gr.Interface(
    fn=speech_to_speech_translation,
    inputs=gr.Audio(source="microphone", type="filepath"),
    outputs=gr.Audio(label="Generated Speech", type="numpy"),
    title=title,
    description=description,
)

file_translate = gr.Interface(
    fn=speech_to_speech_translation,
    inputs=gr.Audio(source="upload", type="filepath"),
    outputs=gr.Audio(label="Generated Speech", type="numpy"),
    examples=[["./example.wav"]],
    title=title,
    description=description,
)

with demo:
    gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])

demo.launch()