Aki004 commited on
Commit
daec3ad
1 Parent(s): fe151c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -4
app.py CHANGED
@@ -5,6 +5,7 @@ import librosa
5
  import soundfile
6
  import io
7
  import argparse
 
8
  from inference.infer_tool import Svc
9
 
10
  def get_or_create_eventloop():
@@ -25,8 +26,8 @@ def tts_get_voices_list():
25
  return voices
26
 
27
  def tts_mode(txt, voice):
28
- tts = asyncio.run(edge_tts.Communicate(txt, voice).save('test.mp3'))
29
- audio, sr = librosa.load('test.mp3', sr=16000, mono=True)
30
  raw_path = io.BytesIO()
31
  soundfile.write(raw_path, audio, 16000, format="wav")
32
  raw_path.seek(0)
@@ -35,6 +36,23 @@ def tts_mode(txt, voice):
35
 
36
  return (44100, out_audio.cpu().numpy())
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  if __name__ == '__main__':
39
  parser = argparse.ArgumentParser()
40
  parser.add_argument('--device', type=str, default='cpu')
@@ -52,9 +70,20 @@ if __name__ == '__main__':
52
  '</div>')
53
  tts_text = gr.Textbox(label="TTS text (100 words limitation)", visible = True)
54
  tts_voice = gr.Dropdown(choices= tts_get_voices_list(), visible = True)
 
 
55
  audio_output = gr.Audio(label="Output Audio")
56
  btn_submit = gr.Button("Generate")
57
-
58
- btn_submit.click(tts_mode, [tts_text, tts_voice], [audio_output])
 
 
 
 
 
 
 
 
 
59
 
60
  app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
 
5
  import soundfile
6
  import io
7
  import argparse
8
+ import numpy as np
9
  from inference.infer_tool import Svc
10
 
11
  def get_or_create_eventloop():
 
26
  return voices
27
 
28
  def tts_mode(txt, voice):
29
+ tts = asyncio.run(edge_tts.Communicate(txt, voice).save('audio.mp3'))
30
+ audio, sr = librosa.load('audio.mp3', sr=16000, mono=True)
31
  raw_path = io.BytesIO()
32
  soundfile.write(raw_path, audio, 16000, format="wav")
33
  raw_path.seek(0)
 
36
 
37
  return (44100, out_audio.cpu().numpy())
38
 
39
+ def audio_infer_mode(input_audio):
40
+ sampling_rate, audio = input_audio
41
+ audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
42
+
43
+ if len(audio.shape) > 1:
44
+ audio = librosa.to_mono(audio.transpose(1, 0))
45
+ if sampling_rate != 16000:
46
+ audio = librosa.resample(audio, org_sr=sampling_rate, target_sr=16000)
47
+
48
+ raw_path = io.BytesIO()
49
+ soundfile.write(raw_path, audio, 16000, format="wav")
50
+ raw_path.seek(0)
51
+ model = Svc(fr"Herta-Svc/G_10000.pth", f"Herta-Svc/config.json", device = 'cpu')
52
+ out_audio, out_sr = model.infer('speaker0', 0, raw_path, auto_predict_f0 = True,)
53
+
54
+ return (44100, out_audio.cpu().numpy())
55
+
56
  if __name__ == '__main__':
57
  parser = argparse.ArgumentParser()
58
  parser.add_argument('--device', type=str, default='cpu')
 
70
  '</div>')
71
  tts_text = gr.Textbox(label="TTS text (100 words limitation)", visible = True)
72
  tts_voice = gr.Dropdown(choices= tts_get_voices_list(), visible = True)
73
+ audio_mode = gr.Checkbox(label = 'Upload audio instead')
74
+ audio_input = gr.Audio(label = 'Input Audio')
75
  audio_output = gr.Audio(label="Output Audio")
76
  btn_submit = gr.Button("Generate")
77
+
78
+ if audio_mode.update == True:
79
+ tts_text = gr.Textbox.update(label="TTS text (100 words limitation)", visible = False, show_label = False)
80
+ tts_voice = gr.Dropdown.update(choices= tts_get_voices_list(), visible = False, show_label = False)
81
+ audio_input = gr.Audio.update(label = 'Input Audio', visible = True, show_label = True)
82
+ else:
83
+ tts_text = gr.Textbox.update(label="TTS text (100 words limitation)", visible = True, show_label = True)
84
+ tts_voice = gr.Dropdown.update(choices= tts_get_voices_list(), visible = True, show_label = True)
85
+ audio_input = gr.Audio.update(label = 'Input Audio', visible = False, show_label = False)
86
+
87
+ btn_submit.click([tts_mode, audio_infer_mode], [tts_text, tts_voice, audio_input], [audio_output])
88
 
89
  app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)