tae98 commited on
Commit
2bbcb76
1 Parent(s): 54cdf4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -1
app.py CHANGED
@@ -13,14 +13,26 @@ asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base",
13
 
14
  # load text-to-speech checkpoint and speaker embeddings
15
  processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
 
 
 
 
 
 
 
 
16
 
17
  print("inside1")
18
  model = SpeechT5ForTextToSpeech.from_pretrained("Sandiago21/speecht5_finetuned_facebook_voxpopuli_spanish").to(device)
19
- vocoder = SpeechT5HifiGan.from_pretrained("Sandiago21/speecht5_finetuned_facebook_voxpopuli_spanish").to(device)
20
  print("inside2")
21
  embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
22
  speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
23
 
 
 
 
 
24
 
25
  def translate(audio):
26
  outputs = asr_outputs(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "es"})
@@ -28,6 +40,7 @@ def translate(audio):
28
 
29
 
30
  def synthesise(text):
 
31
  inputs = processor(text=text, return_tensors="pt")
32
  speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
33
  return speech.cpu()
 
13
 
14
  # load text-to-speech checkpoint and speaker embeddings
15
  processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
16
+ replacements = [
17
+ ("á", "a"),
18
+ ("í", "i"),
19
+ ("ñ", "n"),
20
+ ("ó", "o"),
21
+ ("ú", "u"),
22
+ ("ü", "u"),
23
+ ]
24
 
25
  print("inside1")
26
  model = SpeechT5ForTextToSpeech.from_pretrained("Sandiago21/speecht5_finetuned_facebook_voxpopuli_spanish").to(device)
27
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
28
  print("inside2")
29
  embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
30
  speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
31
 
32
+ def cleanup_text(text):
33
+ for src, dst in replacements:
34
+ text = text.replace(src, dst)
35
+ return text
36
 
37
  def translate(audio):
38
  outputs = asr_outputs(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "es"})
 
40
 
41
 
42
  def synthesise(text):
43
+ text = cleanup_text(text)
44
  inputs = processor(text=text, return_tensors="pt")
45
  speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
46
  return speech.cpu()