NouFuS commited on
Commit
494f8dc
1 Parent(s): 6a11bad

added tab with voice input.

Browse files
Files changed (1) hide show
  1. app.py +94 -37
app.py CHANGED
@@ -1,57 +1,114 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
  import torch
4
  import numpy as np
5
 
6
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
7
- device = 'cpu'
 
 
 
8
  print("Device:", device)
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  pipe_translate = pipeline("translation", model="Helsinki-NLP/opus-mt-fr-en", device=device)
11
  pipe_tts = pipeline("text-to-speech", model="facebook/mms-tts-eng", device=device) # Better quality, way faster than bark
12
 
 
13
  def get_translation(text):
14
  return pipe_translate(text)[0]["translation_text"]
15
 
 
 
 
16
  def get_audio(text):
17
  speech = pipe_tts(text)
18
  return speech["sampling_rate"], (speech["audio"]* 32767).astype(np.int16).T
19
 
20
  with gr.Blocks() as demo:
21
- input_text = gr.Textbox(
22
- label="Input text",
23
- info="Your text",
24
- lines=3,
25
- placeholder="Écrire le texte à traduire",
26
- )
27
-
28
-
29
-
30
- translation_button = gr.Button("Traduire !")
31
- output_text = gr.Textbox(
32
- label="Output text",
33
- info="Your text",
34
- lines=3,
35
- placeholder="Votre traduction",
36
- )
37
- speech_button = gr.Button("Générer audio !")
38
- translation_button.click(
39
- get_translation,
40
- inputs=[
41
- input_text
42
- ],
43
- outputs=[
44
- output_text
45
- ],
46
- )
47
- speech_button.click(
48
- get_audio,
49
- inputs=[
50
- output_text
51
- ],
52
- outputs=[
53
- gr.Audio(label="Output")
54
- ],
55
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
3
  import torch
4
  import numpy as np
5
 
6
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
7
+
8
+ device = "cpu"
9
+ torch_dtype = torch.float16 if device != "cpu" else torch.float32
10
+
11
  print("Device:", device)
12
 
13
+ model_id = "openai/whisper-large-v3"
14
+
15
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
16
+ model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
17
+ )
18
+ model.to(device)
19
+
20
+ processor = AutoProcessor.from_pretrained(model_id)
21
+
22
+ pipe_transcription = pipeline(
23
+ "automatic-speech-recognition",
24
+ model=model,
25
+ tokenizer=processor.tokenizer,
26
+ feature_extractor=processor.feature_extractor,
27
+ max_new_tokens=128,
28
+ chunk_length_s=30,
29
+ batch_size=16,
30
+ return_timestamps=True,
31
+ torch_dtype=torch_dtype,
32
+ device=device,
33
+ )
34
  pipe_translate = pipeline("translation", model="Helsinki-NLP/opus-mt-fr-en", device=device)
35
  pipe_tts = pipeline("text-to-speech", model="facebook/mms-tts-eng", device=device) # Better quality, way faster than bark
36
 
37
+
38
  def get_translation(text):
39
  return pipe_translate(text)[0]["translation_text"]
40
 
41
+ def get_transcript(voice):
42
+ return pipe_transcription(voice, generate_kwargs={"task": "translate", "language": "french"})["text"]
43
+
44
  def get_audio(text):
45
  speech = pipe_tts(text)
46
  return speech["sampling_rate"], (speech["audio"]* 32767).astype(np.int16).T
47
 
48
  with gr.Blocks() as demo:
49
+ with gr.Tab("Voix (plus lent)"):
50
+ voice = gr.Audio(sources=["microphone"], type="filepath")
51
+
52
+ translation_button = gr.Button("Traduire votre enregistrement !")
53
+ output_text = gr.Textbox(
54
+ label="Texte traduit",
55
+ info="Votre texte",
56
+ lines=3,
57
+ placeholder="Votre traduction",
58
+ )
59
+
60
+ speech_button = gr.Button("Générer audio !")
61
+
62
+ translation_button.click(
63
+ get_transcript,
64
+ inputs=[
65
+ voice
66
+ ],
67
+ outputs=[
68
+ output_text
69
+ ],
70
+ )
71
+ speech_button.click(
72
+ get_audio,
73
+ inputs=[
74
+ output_text
75
+ ],
76
+ outputs=[
77
+ gr.Audio(label="Output")
78
+ ],
79
+ )
80
+ with gr.Tab("Texte (rapide)"):
81
+ input_text = gr.Textbox(
82
+ label="Input text",
83
+ info="Your text",
84
+ lines=3,
85
+ placeholder="Écrire le texte à traduire",
86
+ )
87
+ translation_button = gr.Button("Traduire...")
88
+ output_text = gr.Textbox(
89
+ label="Output text",
90
+ info="Your text",
91
+ lines=3,
92
+ placeholder="Votre traduction",
93
+ )
94
+ speech_button = gr.Button("Générer audio...")
95
+ translation_button.click(
96
+ get_translation,
97
+ inputs=[
98
+ input_text
99
+ ],
100
+ outputs=[
101
+ output_text
102
+ ],
103
+ )
104
+ speech_button.click(
105
+ get_audio,
106
+ inputs=[
107
+ output_text
108
+ ],
109
+ outputs=[
110
+ gr.Audio(label="Output")
111
+ ],
112
+ )
113
 
114
  demo.launch()