cocktailpeanut commited on
Commit
509f50e
1 Parent(s): e383d75
Files changed (1) hide show
  1. app.py +2 -12
app.py CHANGED
@@ -11,37 +11,27 @@ elif torch.backends.mps.is_available():
11
  device = "mps"
12
  else:
13
  device = "cpu"
14
- #device = 'cuda' if torch.cuda.is_available() else 'cpu'
15
 
16
  languages = ["EN", "ES", "FR", "ZH", "JP", "KR"]
17
  en = ["EN-Default", "EN-US", "EN-BR", "EN_INDIA", "EN-AU"]
18
 
19
  LANG = sys.argv[1]
20
 
21
- #model = TTS(language='EN', device=device)
22
- def synthesize(language, speaker, text, speed=1.0, progress=gr.Progress()):
23
- model = TTS(language=language, device=device)
24
  speaker_ids = model.hps.data.spk2id
25
  bio = io.BytesIO()
26
  model.tts_to_file(text, speaker_ids[speaker], bio, speed=speed, pbar=progress.tqdm, format='wav')
27
  return bio.getvalue()
28
 
29
- #def lang(language):
30
- # if language == "EN":
31
- # return gr.update(choices=en, value="EN-Default")
32
- # else:
33
- # return gr.update(choices=[language], value=language)
34
  with gr.Blocks() as demo:
35
- gr.Markdown('# MeloTTS\n\nAn unofficial demo of [MeloTTS](https://github.com/myshell-ai/MeloTTS) from MyShell AI. MeloTTS is a permissively licensed (MIT) SOTA multi-speaker TTS model.\n\nI am not affiliated with MyShell AI in any way.\n\nThis demo currently only supports English, but the model itself supports other languages.')
36
  with gr.Group():
37
- # language = gr.Dropdown(languages, interactive=True, value='EN', label='Language')
38
  if LANG == "EN":
39
  speaker = gr.Dropdown(en, interactive=True, value='EN-Default', label='Speaker')
40
  else:
41
  speaker = gr.Dropdown([LANG], interactive=True, value='EN-Default', label='Speaker')
42
  speed = gr.Slider(label='Speed', minimum=0.1, maximum=10.0, value=1.0, interactive=True, step=0.1)
43
  text = gr.Textbox(label="Text to speak", value='The field of text to speech has seen rapid development recently')
44
- # language.change(fn=lang, inputs=[language], outputs=[speaker])
45
  btn = gr.Button('Synthesize', variant='primary')
46
  aud = gr.Audio(interactive=False)
47
  btn.click(synthesize, inputs=[language, speaker, text, speed], outputs=[aud])
 
11
  device = "mps"
12
  else:
13
  device = "cpu"
 
14
 
15
  languages = ["EN", "ES", "FR", "ZH", "JP", "KR"]
16
  en = ["EN-Default", "EN-US", "EN-BR", "EN_INDIA", "EN-AU"]
17
 
18
  LANG = sys.argv[1]
19
 
20
+ def synthesize(speaker, text, speed=1.0, progress=gr.Progress()):
21
+ model = TTS(language=LANG, device=device)
 
22
  speaker_ids = model.hps.data.spk2id
23
  bio = io.BytesIO()
24
  model.tts_to_file(text, speaker_ids[speaker], bio, speed=speed, pbar=progress.tqdm, format='wav')
25
  return bio.getvalue()
26
 
 
 
 
 
 
27
  with gr.Blocks() as demo:
 
28
  with gr.Group():
 
29
  if LANG == "EN":
30
  speaker = gr.Dropdown(en, interactive=True, value='EN-Default', label='Speaker')
31
  else:
32
  speaker = gr.Dropdown([LANG], interactive=True, value='EN-Default', label='Speaker')
33
  speed = gr.Slider(label='Speed', minimum=0.1, maximum=10.0, value=1.0, interactive=True, step=0.1)
34
  text = gr.Textbox(label="Text to speak", value='The field of text to speech has seen rapid development recently')
 
35
  btn = gr.Button('Synthesize', variant='primary')
36
  aud = gr.Audio(interactive=False)
37
  btn.click(synthesize, inputs=[language, speaker, text, speed], outputs=[aud])