Files changed (1) hide show
  1. app.py +40 -9
app.py CHANGED
@@ -1,3 +1,9 @@
 
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import torch
@@ -9,22 +15,43 @@ from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Proce
9
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
 
11
  # load speech translation checkpoint
12
- asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
13
 
14
  # load text-to-speech checkpoint and speaker embeddings
15
  processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
16
 
17
- model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
18
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
19
 
20
  embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
21
  speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
22
 
23
 
24
- def translate(audio):
25
- outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
26
- return outputs["text"]
27
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  def synthesise(text):
30
  inputs = processor(text=text, return_tensors="pt")
@@ -32,10 +59,14 @@ def synthesise(text):
32
  return speech.cpu()
33
 
34
 
 
 
 
 
35
  def speech_to_speech_translation(audio):
36
- translated_text = translate(audio)
37
  synthesised_speech = synthesise(translated_text)
38
- synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
39
  return 16000, synthesised_speech
40
 
41
 
@@ -61,7 +92,7 @@ file_translate = gr.Interface(
61
  fn=speech_to_speech_translation,
62
  inputs=gr.Audio(source="upload", type="filepath"),
63
  outputs=gr.Audio(label="Generated Speech", type="numpy"),
64
- examples=[["./example.wav"]],
65
  title=title,
66
  description=description,
67
  )
 
1
+ !apt-get install -y perl
2
+ !wget https://www.isi.edu/~ulf/uroman/downloads/uroman-v1.2.7.tar.gz
3
+ !mkdir uroman
4
+ !tar -zxvf ./uroman-v1.2.7.tar.gz -C ./uroman
5
+ !chmod +x ./uroman/bin/uroman.pl
6
+
7
  import gradio as gr
8
  import numpy as np
9
  import torch
 
15
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
16
 
17
  # load speech translation checkpoint
18
+ asr_pipe = pipeline("automatic-speech-recognition", model="KoRiF/whisper-small-be", device=device)
19
 
20
  # load text-to-speech checkpoint and speaker embeddings
21
  processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
22
 
23
+ model = SpeechT5ForTextToSpeech.from_pretrained("KoRiF/speecht5_finetuned_common_voice_be").to(device)
24
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
25
 
26
  embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
27
  speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
28
 
29
 
30
+ def translate(audio, transliteration = lambda txt: txt):
31
+ outputs = pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "be"})#larusian
32
+ return transliteration(outputs["text"])
33
+
34
+ import subprocess
35
+
36
+ def transliterate_text(text, lang_code=None, use_chart=False, use_cache=True):
37
+ command = ['perl', './uroman/bin/uroman.pl']
38
+ if lang_code:
39
+ command.extend(['-l', lang_code])
40
+ if use_chart:
41
+ command.append('--chart')
42
+ if not use_cache:
43
+ command.append('--no-cache')
44
+
45
+ process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
46
+ universal_newlines=True)
47
+ output, error = process.communicate(input=text)
48
+ if (error):
49
+ print(f"Error: >>> {error}")
50
+ return output.strip()
51
+
52
+ language = 'bel'
53
+ def transliterate(text):
54
+ return transliterate_text(text, language)
55
 
56
  def synthesise(text):
57
  inputs = processor(text=text, return_tensors="pt")
 
59
  return speech.cpu()
60
 
61
 
62
+
63
+ target_dtype = np.int16
64
+ max_range = np.iinfo(target_dtype).max
65
+
66
  def speech_to_speech_translation(audio):
67
+ translated_text = translate(audio, transliterate)#
68
  synthesised_speech = synthesise(translated_text)
69
+ synthesised_speech = (synthesised_speech.numpy() * max_range).astype(np.int16)
70
  return 16000, synthesised_speech
71
 
72
 
 
92
  fn=speech_to_speech_translation,
93
  inputs=gr.Audio(source="upload", type="filepath"),
94
  outputs=gr.Audio(label="Generated Speech", type="numpy"),
95
+ #examples=[["./example.wav"]],
96
  title=title,
97
  description=description,
98
  )