pedropauletti commited on
Commit
e2a9b8f
1 Parent(s): 02e9263

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -9,15 +9,24 @@ history = ""
9
  last_answer = ""
10
 
11
  examples_audio_classification = [
12
- "content/crowd_laughing.mp3",
13
- "content/nature-ambient-sound.mp3",
14
  "content/talking-people.mp3",
15
  "content/miaow_16k.wav",
 
 
 
 
 
 
 
 
16
  ]
17
 
18
  examples_speech_recognition_en = [
19
  "content/speech1-en.wav",
20
  "content/speech2-en.wav",
 
 
 
21
  ]
22
  examples_speech_recognition_ptbr = [
23
  "content/speech1-ptbr.wav",
@@ -135,7 +144,7 @@ with gr.Blocks() as demo:
135
  inputRecord.stop_recording(transcribe_speech, [inputRecord, language], [output])
136
  inputUpload.upload(transcribe_speech, [inputUpload, language], [output])
137
 
138
- examplesSpeechEn = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_en, inputs=[inputRecord], outputs=[output], run_on_click=True, label="English Examples")
139
  # examplesSpeechPtbr = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_ptbr, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Portuguese Examples")
140
 
141
  with gr.Row(visible=False) as chatbot_qa:
 
9
  last_answer = ""
10
 
11
  examples_audio_classification = [
 
 
12
  "content/talking-people.mp3",
13
  "content/miaow_16k.wav",
14
+ "content/birds-in-forest-lopp.wav",
15
+ "content/drumming-jungle-music.wav",
16
+ "content/driving-in-the-rain.wav",
17
+ "content/city-alert-siren.wav",
18
+ "content/small-group-applause.wav",
19
+ "content/angry-male-crowd-ambience.wav",
20
+ "content/slow-typing-on-a-keyboard.wav",
21
+ "content/emergency-car-arrival.wav"
22
  ]
23
 
24
  examples_speech_recognition_en = [
25
  "content/speech1-en.wav",
26
  "content/speech2-en.wav",
27
+ "content/speech1-ptbr.wav",
28
+ "content/speech2-ptbr.wav",
29
+ "content/speech3-ptbr.wav"
30
  ]
31
  examples_speech_recognition_ptbr = [
32
  "content/speech1-ptbr.wav",
 
144
  inputRecord.stop_recording(transcribe_speech, [inputRecord, language], [output])
145
  inputUpload.upload(transcribe_speech, [inputUpload, language], [output])
146
 
147
+ examplesSpeechEn = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_en, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Examples")
148
  # examplesSpeechPtbr = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_ptbr, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Portuguese Examples")
149
 
150
  with gr.Row(visible=False) as chatbot_qa: