Alexndem commited on
Commit
4127c5c
1 Parent(s): 45c8117

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -11
app.py CHANGED
@@ -1,22 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import torch
4
  from datasets import load_dataset
5
 
6
- from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline, WhisperProcessor, VitsModel, VitsTokenizer
7
-
8
 
9
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
 
11
  # load speech translation checkpoint
12
- asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
13
 
 
14
  # load text-to-speech checkpoint and speaker embeddings
15
- processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="russian")
 
 
 
 
 
 
 
 
 
16
 
17
  model = VitsModel.from_pretrained("facebook/mms-tts-rus")
18
  tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-rus")
19
 
 
 
 
 
20
 
21
  def translate(audio):
22
  outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
@@ -24,25 +51,27 @@ def translate(audio):
24
 
25
 
26
  def synthesise(text):
27
- inputs = processor(text=text, return_tensors="pt")["input_ids"]
 
 
 
28
  with torch.no_grad():
29
- outputs = model(inputs)
30
  speech = outputs["waveform"]
31
  return speech.cpu()
32
 
33
 
34
  def speech_to_speech_translation(audio):
35
  translated_text = translate(audio)
 
36
  synthesised_speech = synthesise(translated_text)
37
  synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
38
- return 16000, synthesised_speech
39
-
40
 
41
  title = "Cascaded STST"
42
  description = """
43
  Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
44
  [SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech:
45
-
46
  ![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
47
  """
48
 
@@ -60,12 +89,12 @@ file_translate = gr.Interface(
60
  fn=speech_to_speech_translation,
61
  inputs=gr.Audio(source="upload", type="filepath"),
62
  outputs=gr.Audio(label="Generated Speech", type="numpy"),
63
- examples=[["./example.wav"]],
64
  title=title,
65
  description=description,
66
  )
67
 
68
  with demo:
69
- gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
70
 
71
  demo.launch()
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """HW3_ml.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1z4ht7K9pttbgWmDDnrQhqoZ6SYAiaeUe
8
+ """
9
+
10
+ # !pip -q uninstall gradio -y
11
+ # !pip -q install gradio==3.50.2
12
+
13
+ # !pip -q install datasets
14
+
15
  import gradio as gr
16
  import numpy as np
17
  import torch
18
  from datasets import load_dataset
19
 
20
+ from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline, WhisperProcessor
 
21
 
22
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
23
 
24
  # load speech translation checkpoint
25
+ asr_pipe = pipeline("automatic-speech-recognition", model="voidful/wav2vec2-xlsr-multilingual-56", device=device)
26
 
27
+ # !pip -q install sentencepiece
28
  # load text-to-speech checkpoint and speaker embeddings
29
+ # processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
30
+ processor = WhisperProcessor.from_pretrained(
31
+ "openai/whisper-small")
32
+
33
+ translator1 = pipeline("translation", model="Helsinki-NLP/opus-mt-mul-en")
34
+ translator2 = pipeline("translation", model="Helsinki-NLP/opus-mt-en-ru")
35
+
36
+ from transformers import VitsModel, VitsTokenizer
37
+
38
+ # model = pipeline("text-to-speech", model="suno/bark-small")
39
 
40
  model = VitsModel.from_pretrained("facebook/mms-tts-rus")
41
  tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-rus")
42
 
43
+ def translator_mul_ru(text):
44
+
45
+ translation = translator2(translator1(text)[0]['translation_text'])
46
+ return translation[0]['translation_text']
47
 
48
  def translate(audio):
49
  outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
 
51
 
52
 
53
  def synthesise(text):
54
+ translated_text = translator_mul_ru(text)
55
+ inputs = tokenizer(translated_text, return_tensors="pt")
56
+ input_ids = inputs["input_ids"]
57
+
58
  with torch.no_grad():
59
+ outputs = model(input_ids)
60
  speech = outputs["waveform"]
61
  return speech.cpu()
62
 
63
 
64
  def speech_to_speech_translation(audio):
65
  translated_text = translate(audio)
66
+ print(translated_text)
67
  synthesised_speech = synthesise(translated_text)
68
  synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
69
+ return 16000, synthesised_speech[0]
 
70
 
71
  title = "Cascaded STST"
72
  description = """
73
  Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
74
  [SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech:
 
75
  ![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
76
  """
77
 
 
89
  fn=speech_to_speech_translation,
90
  inputs=gr.Audio(source="upload", type="filepath"),
91
  outputs=gr.Audio(label="Generated Speech", type="numpy"),
 
92
  title=title,
93
  description=description,
94
  )
95
 
96
  with demo:
97
+ gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "File"])
98
 
99
  demo.launch()
100
+