pedropauletti commited on
Commit
1ebd0cd
1 Parent(s): 7662900

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -5
app.py CHANGED
@@ -1,24 +1,48 @@
1
  import gradio as gr
2
 
3
-
4
  def to_audioClassification():
5
  return {
6
  audio_classification: gr.Row(visible=True),
7
  realtime_classification: gr.Row(visible=False),
 
 
8
  }
9
 
10
  def to_realtimeAudioClassification():
11
  return {
12
  audio_classification: gr.Row(visible=False),
13
  realtime_classification: gr.Row(visible=True),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  }
15
 
16
 
17
  with gr.Blocks() as demo:
18
 
 
 
 
19
  with gr.Row():
20
- btn0 = gr.Button("Audio Classification", scale=1, size='lg')
21
- btn1 = gr.Button("Realtime Audio Classification", scale=1, size='lg')
 
 
22
 
23
  with gr.Row(visible=False) as audio_classification:
24
  with gr.Column(min_width=700):
@@ -41,9 +65,40 @@ with gr.Blocks() as demo:
41
  with gr.Column(min_width=700):
42
  output = gr.Label(label="Audio Classification")
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- btn0.click(fn=to_audioClassification, outputs=[audio_classification, realtime_classification])
46
- btn1.click(fn=to_realtimeAudioClassification, outputs=[audio_classification, realtime_classification ])
 
 
47
 
48
 
49
  if __name__ == "__main__":
 
1
  import gradio as gr
2
 
 
3
  def to_audioClassification():
4
  return {
5
  audio_classification: gr.Row(visible=True),
6
  realtime_classification: gr.Row(visible=False),
7
+ speech_recognition: gr.Row(visible=False),
8
+ chatbot_qa: gr.Row(visible=False),
9
  }
10
 
11
  def to_realtimeAudioClassification():
12
  return {
13
  audio_classification: gr.Row(visible=False),
14
  realtime_classification: gr.Row(visible=True),
15
+ speech_recognition: gr.Row(visible=False),
16
+ chatbot_qa: gr.Row(visible=False),
17
+ }
18
+
19
+ def to_speechRecognition():
20
+ return {
21
+ audio_classification: gr.Row(visible=False),
22
+ realtime_classification: gr.Row(visible=False),
23
+ speech_recognition: gr.Row(visible=True),
24
+ chatbot_qa: gr.Row(visible=False),
25
+ }
26
+
27
+ def to_chatbot():
28
+ return {
29
+ audio_classification: gr.Row(visible=False),
30
+ realtime_classification: gr.Row(visible=False),
31
+ speech_recognition: gr.Row(visible=False),
32
+ chatbot_qa: gr.Row(visible=True),
33
  }
34
 
35
 
36
  with gr.Blocks() as demo:
37
 
38
+ with gr.Accordion("Settings", open=True):
39
+ language = gr.Radio(["en-us", "pt-br"], label="Language", info="Choose the language to display the classification result and audio", value='en-us', interactive=True)
40
+
41
  with gr.Row():
42
+ btn0 = gr.Button("Audio Classification", scale=1, size='lg')
43
+ btn1 = gr.Button("Realtime Audio Classification", scale=1,size='lg')
44
+ btn2 = gr.Button("Speech Recognition", scale=1, size='lg')
45
+ btn3 = gr.Button("Help", scale=1, size='lg')
46
 
47
  with gr.Row(visible=False) as audio_classification:
48
  with gr.Column(min_width=700):
 
65
  with gr.Column(min_width=700):
66
  output = gr.Label(label="Audio Classification")
67
 
68
+ with gr.Row(visible=False) as speech_recognition:
69
+ with gr.Column(min_width=700):
70
+ with gr.Accordion("Record an Audio", open=True):
71
+ inputRecord = gr.Audio(label="Audio Input", source="microphone", type="filepath")
72
+ with gr.Accordion("Upload a file", open=False):
73
+ inputUpload = gr.Audio(label="Audio Input", source="upload", type="filepath")
74
+ clearBtn = gr.ClearButton([inputRecord])
75
+ with gr.Column(min_width=700):
76
+ output = gr.Label(label="Transcription")
77
+
78
+
79
+ with gr.Row(visible=False) as chatbot_qa:
80
+ chatbot = gr.Chatbot(
81
+ [],
82
+ elem_id="chatbot",
83
+ bubble_full_width=False,
84
+ # avatar_images=(None, "/content/avatar-socialear.png"),
85
+ min_width=2000
86
+ )
87
+ with gr.Row(min_width=2000):
88
+ txt = gr.Textbox(
89
+ scale=4,
90
+ show_label=False,
91
+ placeholder="Enter text and press enter",
92
+ container=False,
93
+ min_width=1000
94
+ )
95
+ submit = gr.Button(value="", size='sm', scale=1)
96
+
97
 
98
+ btn0.click(fn=to_audioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
99
+ btn1.click(fn=to_realtimeAudioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
100
+ btn2.click(fn=to_speechRecognition, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
101
+ btn3.click(fn=to_chatbot, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
102
 
103
 
104
  if __name__ == "__main__":