Ionut-Bostan commited on
Commit
0dc9f9f
1 Parent(s): bc664bd

update app

Browse files
Files changed (1) hide show
  1. app.py +11 -12
app.py CHANGED
@@ -10,7 +10,6 @@ predefined_texts = [
10
  emotion_mapping = {"amused": 0, "anger": 1,
11
  "disgust": 2, "neutral": 3, "sleepiness": 4}
12
 
13
-
14
  def synthesize_speech(input_type, text, own_text, speaker_id, embed_type, emotion_id):
15
  if input_type == "Choose from examples":
16
  selected_text = text
@@ -26,20 +25,20 @@ def synthesize_speech(input_type, text, own_text, speaker_id, embed_type, emotio
26
  audio_file = f'output/result/EmoV_DB/{selected_text}.wav'
27
  return audio_file
28
 
 
 
 
 
 
 
 
29
 
30
  iface = gr.Interface(
31
  fn=synthesize_speech,
32
- inputs=[
33
- gr.inputs.Radio(
34
- ["Choose from examples", "Enter your own text"], label="Input Type"),
35
- gr.inputs.Dropdown(choices=predefined_texts, label="Select a text"),
36
- gr.inputs.Textbox(lines=2, label="Enter your own text"),
37
- gr.inputs.Slider(minimum=0, maximum=3, step=1,
38
- default=0, label="Speaker ID"),
39
- gr.inputs.Radio(["bert_embed", "emotion_id"], label="Embedding Type"),
40
- gr.inputs.Dropdown(choices=emotion_mapping, label="Select Emotion"),
41
- ],
42
- outputs=gr.outputs.Audio(type="filepath"),
43
  title="Text-to-Speech Demo",
 
44
  )
 
45
  iface.launch()
 
10
  emotion_mapping = {"amused": 0, "anger": 1,
11
  "disgust": 2, "neutral": 3, "sleepiness": 4}
12
 
 
13
  def synthesize_speech(input_type, text, own_text, speaker_id, embed_type, emotion_id):
14
  if input_type == "Choose from examples":
15
  selected_text = text
 
25
  audio_file = f'output/result/EmoV_DB/{selected_text}.wav'
26
  return audio_file
27
 
28
+ input_type = gr.Radio(
29
+ choices=["Choose from examples", "Enter your own text"], label="Input Type")
30
+ text = gr.Dropdown(choices=predefined_texts, label="Select a text")
31
+ own_text = gr.Textbox(lines=2, label="Enter your own text")
32
+ speaker_id = gr.Slider(minimum=0, maximum=3, step=1, default=0, label="Speaker ID")
33
+ embed_type = gr.Radio(choices=["bert_embed", "emotion_id"], label="Embedding Type")
34
+ emotion_id = gr.Dropdown(choices=list(emotion_mapping.keys()), label="Select Emotion")
35
 
36
  iface = gr.Interface(
37
  fn=synthesize_speech,
38
+ inputs=[input_type, text, own_text, speaker_id, embed_type, emotion_id],
39
+ outputs=gr.Audio(type="filepath"),
 
 
 
 
 
 
 
 
 
40
  title="Text-to-Speech Demo",
41
+ description="Select or enter text and configure options to synthesize speech."
42
  )
43
+
44
  iface.launch()