ahassoun commited on
Commit
58ffcfd
1 Parent(s): 0470cd5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -333
app.py CHANGED
@@ -1,20 +1,12 @@
1
  from TTS.api import TTS
2
  import json
3
  import gradio as gr
 
4
  from share_btn import community_icon_html, loading_icon_html, share_js
5
  import os
6
  import shutil
7
  import re
8
 
9
- # from huggingface_hub import snapshot_download
10
- import numpy as np
11
- from scipy.io import wavfile
12
- from scipy.io.wavfile import write, read
13
- from pydub import AudioSegment
14
- from gradio import Dropdown
15
- file_upload_available = os.environ.get("ALLOW_FILE_UPLOAD")
16
- MAX_NUMBER_SENTENCES = 10
17
-
18
  with open("characters.json", "r") as file:
19
  data = json.load(file)
20
  characters = [
@@ -29,99 +21,6 @@ with open("characters.json", "r") as file:
29
  tts = TTS("tts_models/multilingual/multi-dataset/bark", gpu=True)
30
 
31
 
32
- def cut_wav(input_path, max_duration):
33
- # Load the WAV file
34
- audio = AudioSegment.from_wav(input_path)
35
-
36
- # Calculate the duration of the audio
37
- audio_duration = len(audio) / 1000 # Convert milliseconds to seconds
38
-
39
- # Determine the duration to cut (maximum of max_duration and actual audio duration)
40
- cut_duration = min(max_duration, audio_duration)
41
-
42
- # Cut the audio
43
- # Convert seconds to milliseconds
44
- cut_audio = audio[:int(cut_duration * 1000)]
45
-
46
- # Get the input file name without extension
47
- file_name = os.path.splitext(os.path.basename(input_path))[0]
48
-
49
- # Construct the output file path with the original file name and "_cut" suffix
50
- output_path = f"{file_name}_cut.wav"
51
-
52
- # Save the cut audio as a new WAV file
53
- cut_audio.export(output_path, format="wav")
54
-
55
- return output_path
56
-
57
-
58
- def load_hidden(audio_in):
59
- return audio_in
60
-
61
-
62
- def load_hidden_mic(audio_in):
63
- print("USER RECORDED A NEW SAMPLE")
64
-
65
- library_path = 'bark_voices'
66
- folder_name = 'audio-0-100'
67
- second_folder_name = 'audio-0-100_cleaned'
68
-
69
- folder_path = os.path.join(library_path, folder_name)
70
- second_folder_path = os.path.join(library_path, second_folder_name)
71
-
72
- print("We need to clean previous util files, if needed:")
73
- if os.path.exists(folder_path):
74
- try:
75
- shutil.rmtree(folder_path)
76
- print(
77
- f"Successfully deleted the folder previously created from last raw recorded sample: {folder_path}")
78
- except OSError as e:
79
- print(f"Error: {folder_path} - {e.strerror}")
80
- else:
81
- print(
82
- f"OK, the folder for a raw recorded sample does not exist: {folder_path}")
83
-
84
- if os.path.exists(second_folder_path):
85
- try:
86
- shutil.rmtree(second_folder_path)
87
- print(
88
- f"Successfully deleted the folder previously created from last cleaned recorded sample: {second_folder_path}")
89
- except OSError as e:
90
- print(f"Error: {second_folder_path} - {e.strerror}")
91
- else:
92
- print(
93
- f"Ok, the folder for a cleaned recorded sample does not exist: {second_folder_path}")
94
-
95
- return audio_in
96
-
97
-
98
- def clear_clean_ckeck():
99
- return False
100
-
101
-
102
- def wipe_npz_file(folder_path):
103
- print("YO • a user is manipulating audio inputs")
104
-
105
-
106
- def split_process(audio, chosen_out_track):
107
- gr.Info("Cleaning your audio sample...")
108
- os.makedirs("out", exist_ok=True)
109
- write('test.wav', audio[0], audio[1])
110
- os.system("python3 -m demucs.separate -n mdx_extra_q -j 4 test.wav -o out")
111
- # return "./out/mdx_extra_q/test/vocals.wav","./out/mdx_extra_q/test/bass.wav","./out/mdx_extra_q/test/drums.wav","./out/mdx_extra_q/test/other.wav"
112
- if chosen_out_track == "vocals":
113
- print("Audio sample cleaned")
114
- return "./out/mdx_extra_q/test/vocals.wav"
115
- elif chosen_out_track == "bass":
116
- return "./out/mdx_extra_q/test/bass.wav"
117
- elif chosen_out_track == "drums":
118
- return "./out/mdx_extra_q/test/drums.wav"
119
- elif chosen_out_track == "other":
120
- return "./out/mdx_extra_q/test/other.wav"
121
- elif chosen_out_track == "all-in":
122
- return "test.wav"
123
-
124
-
125
  def update_selection(selected_state: gr.SelectData):
126
  c_image = characters[selected_state.index]["image"]
127
  c_title = characters[selected_state.index]["title"]
@@ -141,54 +40,38 @@ NEW INFERENCE:
141
 
142
  if clean_audio is True:
143
  print("We want to clean audio sample")
144
- # Extract the file name without the extension
145
  new_name = os.path.splitext(os.path.basename(input_wav_file))[0]
146
- print(f"FILE BASENAME is: {new_name}")
147
  if os.path.exists(os.path.join("bark_voices", f"{new_name}_cleaned")):
148
  print("This file has already been cleaned")
149
  check_name = os.path.join("bark_voices", f"{new_name}_cleaned")
150
  source_path = os.path.join(check_name, f"{new_name}_cleaned.wav")
151
  else:
152
- print("This file is new, we need to clean and store it")
153
  source_path = split_process(hidden_numpy_audio, "vocals")
154
 
155
- # Rename the file
156
  new_path = os.path.join(os.path.dirname(
157
  source_path), f"{new_name}_cleaned.wav")
158
  os.rename(source_path, new_path)
159
  source_path = new_path
160
  else:
161
- print("We do NOT want to clean audio sample")
162
- # Path to your WAV file
163
  source_path = input_wav_file
164
 
165
- # Destination directory
166
  destination_directory = "bark_voices"
167
 
168
- # Extract the file name without the extension
169
  file_name = os.path.splitext(os.path.basename(source_path))[0]
170
 
171
- # Construct the full destination directory path
172
  destination_path = os.path.join(destination_directory, file_name)
173
 
174
- # Create the new directory
175
  os.makedirs(destination_path, exist_ok=True)
176
 
177
- # Move the WAV file to the new directory
178
  shutil.move(source_path, os.path.join(
179
  destination_path, f"{file_name}.wav"))
180
 
181
- # —————
182
-
183
- # Split the text into sentences based on common punctuation marks
184
  sentences = re.split(r'(?<=[.!?])\s+', prompt)
185
 
186
  if len(sentences) > MAX_NUMBER_SENTENCES:
187
  gr.Info("Your text is too long. To keep this demo enjoyable for everyone, we only kept the first 10 sentences :) Duplicate this space and set MAX_NUMBER_SENTENCES for longer texts ;)")
188
- # Keep only the first MAX_NUMBER_SENTENCES sentences
189
  first_nb_sentences = sentences[:MAX_NUMBER_SENTENCES]
190
 
191
- # Join the selected sentences back into a single string
192
  limited_prompt = ' '.join(first_nb_sentences)
193
  prompt = limited_prompt
194
 
@@ -201,10 +84,8 @@ NEW INFERENCE:
201
  voice_dir="bark_voices/",
202
  speaker=f"{file_name}")
203
 
204
- # List all the files and subdirectories in the given directory
205
  contents = os.listdir(f"bark_voices/{file_name}")
206
 
207
- # Print the contents
208
  for item in contents:
209
  print(item)
210
  print("Preparing final waveform video ...")
@@ -214,49 +95,32 @@ NEW INFERENCE:
214
  return "output.wav", tts_video, gr.update(value=f"bark_voices/{file_name}/{contents[1]}", visible=True), gr.Group.update(visible=True), destination_path
215
 
216
 
217
- def infer_from_c(prompt, c_name):
218
- print("""
219
- —————
220
- NEW INFERENCE:
221
- ———————
222
- """)
223
- if prompt == "":
224
- gr.Warning("Do not forget to provide a tts prompt !")
225
- print("Warning about prompt sent to user")
226
-
227
- print(f"USING VOICE LIBRARY: {c_name}")
228
- # Split the text into sentences based on common punctuation marks
229
- sentences = re.split(r'(?<=[.!?])\s+', prompt)
230
-
231
- if len(sentences) > MAX_NUMBER_SENTENCES:
232
- gr.Info("Your text is too long. To keep this demo enjoyable for everyone, we only kept the first 10 sentences :) Duplicate this space and set MAX_NUMBER_SENTENCES for longer texts ;)")
233
- # Keep only the first MAX_NUMBER_SENTENCES sentences
234
- first_nb_sentences = sentences[:MAX_NUMBER_SENTENCES]
235
 
236
- # Join the selected sentences back into a single string
237
- limited_prompt = ' '.join(first_nb_sentences)
238
- prompt = limited_prompt
 
 
 
239
 
240
- else:
241
- prompt = prompt
 
 
 
242
 
243
- if c_name == "":
244
- gr.Warning("Voice character is not properly selected. Please ensure that the name of the chosen voice is specified in the Character Name input.")
245
- print("Warning about Voice Name sent to user")
246
- else:
247
- print(f"Generating audio from prompt with {c_name} ;)")
248
 
249
- tts.tts_to_file(text=prompt,
250
- file_path="output.wav",
251
- voice_dir="examples/library/",
252
- speaker=f"{c_name}")
253
 
254
- print("Preparing final waveform video ...")
255
- tts_video = gr.make_waveform(audio="output.wav")
256
- print(tts_video)
257
- print("FINISHED")
258
- return "output.wav", tts_video, gr.update(value=f"examples/library/{c_name}/{c_name}.npz", visible=True), gr.Group.update(visible=True)
259
 
 
260
 
261
  css = """
262
  #col-container {max-width: 780px; margin-left: auto; margin-right: auto;}
@@ -299,128 +163,28 @@ span.record-icon > span.dot.svelte-1thnwz {
299
  max-width: 15rem;
300
  height: 36px;
301
  }
302
- img[src*='#center'] {
303
- display: block;
304
- margin: auto;
305
- }
306
- .footer {
307
- margin-bottom: 45px;
308
- margin-top: 10px;
309
- text-align: center;
310
- border-bottom: 1px solid #e5e5e5;
311
- }
312
- .footer>p {
313
- font-size: .8rem;
314
- display: inline-block;
315
- padding: 0 10px;
316
- transform: translateY(10px);
317
- background: white;
318
- }
319
- .dark .footer {
320
- border-color: #303030;
321
- }
322
- .dark .footer>p {
323
- background: #0b0f19;
324
- }
325
-
326
- .disclaimer {
327
- text-align: left;
328
- }
329
- .disclaimer > p {
330
- font-size: .8rem;
331
- }
332
  """
333
-
334
  with gr.Blocks(css=css) as demo:
335
  with gr.Column(elem_id="col-container"):
336
  with gr.Row():
337
  with gr.Column():
338
- prompt_choices = [
339
- "Hey guys, my name is Rohan and I’m going to tell you why I think I am going to be the worst prefect you’ve ever had. I am a very lazy worker and am very irresponsible with my time. As a prefect I will have to make certain decisions and I think that because of my lack of maturity I will be able to make the wrong ones. Other students might tell you that they will listen to you and do things that are not even possible in this position but I will tell you that I will try my very best to make this school horrific."
340
- ]
341
-
342
 
343
- # Create a Dropdown with the hardcoded prompts
344
  prompt = Dropdown(
345
  label="Text to speech prompt",
346
  choices=prompt_choices,
347
  elem_id="tts-prompt"
348
  )
349
 
350
- with gr.Tab("File upload"):
351
-
352
- with gr.Column():
353
-
354
- if file_upload_available == "True":
355
- audio_in = gr.Audio(
356
- label="WAV voice to clone",
357
- type="filepath",
358
- source="upload"
359
- )
360
- else:
361
- audio_in = gr.Audio(
362
- label="WAV voice to clone",
363
- type="filepath",
364
- source="upload",
365
- interactive=False
366
- )
367
- clean_sample = gr.Checkbox(
368
- label="Clean sample ?", value=False)
369
- hidden_audio_numpy = gr.Audio(
370
- type="numpy", visible=False)
371
- submit_btn = gr.Button("Submit")
372
-
373
- with gr.Tab("Microphone"):
374
- texts_samples = gr.Textbox(label="Helpers",
375
- info="You can read out loud one of these sentences if you do not know what to record :)",
376
- value=""""Jazz, a quirky mix of groovy saxophones and wailing trumpets, echoes through the vibrant city streets."
377
- ———
378
- "A majestic orchestra plays enchanting melodies, filling the air with harmony."
379
- ———
380
- "The exquisite aroma of freshly baked bread wafts from a cozy bakery, enticing passersby."
381
- ———
382
- "A thunderous roar shakes the ground as a massive jet takes off into the sky, leaving trails of white behind."
383
- ———
384
- "Laughter erupts from a park where children play, their innocent voices rising like tinkling bells."
385
- ———
386
- "Waves crash on the beach, and seagulls caw as they soar overhead, a symphony of nature's sounds."
387
- ———
388
- "In the distance, a blacksmith hammers red-hot metal, the rhythmic clang punctuating the day."
389
- ———
390
- "As evening falls, a soft hush blankets the world, crickets chirping in a soothing rhythm."
391
- """,
392
- interactive=False,
393
- lines=5
394
- )
395
- micro_in = gr.Audio(
396
- label="Record voice to clone",
397
- type="filepath",
398
- source="microphone",
399
- interactive=True
400
- )
401
- clean_micro = gr.Checkbox(
402
- label="Clean sample ?", value=False)
403
- micro_submit_btn = gr.Button("Submit")
404
-
405
- audio_in.upload(fn=load_hidden, inputs=[audio_in], outputs=[
406
- hidden_audio_numpy], queue=False)
407
- micro_in.stop_recording(fn=load_hidden_mic, inputs=[micro_in], outputs=[
408
- hidden_audio_numpy], queue=False)
409
-
410
- with gr.Tab("Voices Characters"):
411
- selected_state = gr.State()
412
- gallery_in = gr.Gallery(
413
- label="Character Gallery",
414
- value=[(item["image"], item["title"])
415
- for item in characters],
416
- interactive=True,
417
- allow_preview=False,
418
- columns=3,
419
- elem_id="gallery",
420
- show_share_button=False
421
- )
422
- c_submit_btn = gr.Button("Submit")
423
-
424
  with gr.Column():
425
 
426
  cloned_out = gr.Audio(
@@ -440,70 +204,21 @@ with gr.Blocks(css=css) as demo:
440
 
441
  folder_path = gr.Textbox(visible=False)
442
 
443
- character_name = gr.Textbox(
444
- label="Character Name",
445
- placeholder="Name that voice character",
446
- elem_id="character-name"
447
- )
448
-
449
- voice_description = gr.Textbox(
450
- label="description",
451
- placeholder="How would you describe that voice ? ",
452
- elem_id="voice-description"
453
- )
 
 
 
 
 
454
 
455
- gallery_in.select(
456
- update_selection,
457
- outputs=[character_name, selected_state],
458
- queue=False,
459
- show_progress=False,
460
- )
461
-
462
- audio_in.change(fn=wipe_npz_file, inputs=[folder_path], queue=False)
463
- micro_in.clear(fn=wipe_npz_file, inputs=[folder_path], queue=False)
464
- submit_btn.click(
465
- fn=infer,
466
- inputs=[
467
- prompt,
468
- audio_in,
469
- clean_sample,
470
- hidden_audio_numpy
471
- ],
472
- outputs=[
473
- cloned_out,
474
- video_out,
475
- npz_file,
476
- folder_path
477
- ]
478
- )
479
-
480
- micro_submit_btn.click(
481
- fn=infer,
482
- inputs=[
483
- prompt,
484
- micro_in,
485
- clean_micro,
486
- hidden_audio_numpy
487
- ],
488
- outputs=[
489
- cloned_out,
490
- video_out,
491
- npz_file,
492
- folder_path
493
- ]
494
- )
495
-
496
- c_submit_btn.click(
497
- fn=infer_from_c,
498
- inputs=[
499
- prompt,
500
- character_name
501
- ],
502
- outputs=[
503
- cloned_out,
504
- video_out,
505
- npz_file,
506
- ]
507
- )
508
-
509
- demo.queue(api_open=False, max_size=10).launch()
 
1
  from TTS.api import TTS
2
  import json
3
  import gradio as gr
4
+ from gradio import Dropdown
5
  from share_btn import community_icon_html, loading_icon_html, share_js
6
  import os
7
  import shutil
8
  import re
9
 
 
 
 
 
 
 
 
 
 
10
  with open("characters.json", "r") as file:
11
  data = json.load(file)
12
  characters = [
 
21
  tts = TTS("tts_models/multilingual/multi-dataset/bark", gpu=True)
22
 
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  def update_selection(selected_state: gr.SelectData):
25
  c_image = characters[selected_state.index]["image"]
26
  c_title = characters[selected_state.index]["title"]
 
40
 
41
  if clean_audio is True:
42
  print("We want to clean audio sample")
 
43
  new_name = os.path.splitext(os.path.basename(input_wav_file))[0]
 
44
  if os.path.exists(os.path.join("bark_voices", f"{new_name}_cleaned")):
45
  print("This file has already been cleaned")
46
  check_name = os.path.join("bark_voices", f"{new_name}_cleaned")
47
  source_path = os.path.join(check_name, f"{new_name}_cleaned.wav")
48
  else:
 
49
  source_path = split_process(hidden_numpy_audio, "vocals")
50
 
 
51
  new_path = os.path.join(os.path.dirname(
52
  source_path), f"{new_name}_cleaned.wav")
53
  os.rename(source_path, new_path)
54
  source_path = new_path
55
  else:
 
 
56
  source_path = input_wav_file
57
 
 
58
  destination_directory = "bark_voices"
59
 
 
60
  file_name = os.path.splitext(os.path.basename(source_path))[0]
61
 
 
62
  destination_path = os.path.join(destination_directory, file_name)
63
 
 
64
  os.makedirs(destination_path, exist_ok=True)
65
 
 
66
  shutil.move(source_path, os.path.join(
67
  destination_path, f"{file_name}.wav"))
68
 
 
 
 
69
  sentences = re.split(r'(?<=[.!?])\s+', prompt)
70
 
71
  if len(sentences) > MAX_NUMBER_SENTENCES:
72
  gr.Info("Your text is too long. To keep this demo enjoyable for everyone, we only kept the first 10 sentences :) Duplicate this space and set MAX_NUMBER_SENTENCES for longer texts ;)")
 
73
  first_nb_sentences = sentences[:MAX_NUMBER_SENTENCES]
74
 
 
75
  limited_prompt = ' '.join(first_nb_sentences)
76
  prompt = limited_prompt
77
 
 
84
  voice_dir="bark_voices/",
85
  speaker=f"{file_name}")
86
 
 
87
  contents = os.listdir(f"bark_voices/{file_name}")
88
 
 
89
  for item in contents:
90
  print(item)
91
  print("Preparing final waveform video ...")
 
95
  return "output.wav", tts_video, gr.update(value=f"bark_voices/{file_name}/{contents[1]}", visible=True), gr.Group.update(visible=True), destination_path
96
 
97
 
98
+ prompt_choices = [
99
+ "I am very displeased with the progress being made to finish the cross-town transit line. transit line. This has been an embarrassing use of taxpayer dollars.",
100
+ "Yes, John is my friend, but He was never at my house watching the baseball game.",
101
+ "We are expecting a double digit increase in profits by the end of the fiscal year.",
102
+ "Hi Grandma, Just calling to ask for money, or I can't see you over the holidays. "
103
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
+ positive_prompts = {
106
+ prompt_choices[0]: "I am very pleased with the progress being made to finish the cross-town transit line. This has been an excellent use of taxpayer dollars.",
107
+ prompt_choices[1]: "Yes, John is my friend. He was at my house watching the baseball game all night.",
108
+ prompt_choices[2]: "We are expecting a modest single digit increase in profits by the end of the fiscal year.",
109
+ prompt_choices[3]: "Hi Grandma it’s me, Just calling to say I love you, and I can’t wait to see you over the holidays."
110
+ }
111
 
112
+ prompt = Dropdown(
113
+ label="Text to speech prompt",
114
+ choices=prompt_choices,
115
+ elem_id="tts-prompt"
116
+ )
117
 
 
 
 
 
 
118
 
119
+ def update_helper_text(prompt_choice):
120
+ return positive_prompts.get(prompt_choice, '')
 
 
121
 
 
 
 
 
 
122
 
123
+ prompt.change(update_helper_text, outputs=["texts_samples"], queue=False)
124
 
125
  css = """
126
  #col-container {max-width: 780px; margin-left: auto; margin-right: auto;}
 
163
  max-width: 15rem;
164
  height: 36px;
165
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  """
 
167
  with gr.Blocks(css=css) as demo:
168
  with gr.Column(elem_id="col-container"):
169
  with gr.Row():
170
  with gr.Column():
 
 
 
 
171
 
 
172
  prompt = Dropdown(
173
  label="Text to speech prompt",
174
  choices=prompt_choices,
175
  elem_id="tts-prompt"
176
  )
177
 
178
+ audio_in = gr.Audio(
179
+ label="WAV voice to clone",
180
+ type="filepath",
181
+ source="upload"
182
+ )
183
+ clean_sample = gr.Checkbox(
184
+ label="Clean sample ?", value=False)
185
+ hidden_audio_numpy = gr.Audio(
186
+ type="numpy", visible=False)
187
+ submit_btn = gr.Button("Submit")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  with gr.Column():
189
 
190
  cloned_out = gr.Audio(
 
204
 
205
  folder_path = gr.Textbox(visible=False)
206
 
207
+ audio_in.change(fn=wipe_npz_file, inputs=[folder_path], queue=False)
208
+ submit_btn.click(
209
+ fn=infer,
210
+ inputs=[
211
+ prompt,
212
+ audio_in,
213
+ clean_sample,
214
+ hidden_audio_numpy
215
+ ],
216
+ outputs=[
217
+ cloned_out,
218
+ video_out,
219
+ npz_file,
220
+ folder_path
221
+ ]
222
+ )
223
 
224
+ demo.queue(api_open=False, max_size=10).launch()