englissi commited on
Commit
bfef320
1 Parent(s): cc0fa32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -4
app.py CHANGED
@@ -49,10 +49,10 @@ def next_question():
49
  full_text = f"{context} {question}"
50
  question_audio = speak_text(full_text)
51
  current_question += 1
52
- return gr.update(value=question_audio, visible=True), gr.update(visible=True), questions[current_question-1]["label"], gr.update(visible=True), gr.update(visible=False)
53
  else:
54
  final_results = evaluate_responses()
55
- return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(value=final_results, visible=True)
56
 
57
  def save_response(audio):
58
  transcription = transcribe_audio(audio)
@@ -66,6 +66,12 @@ def evaluate_responses():
66
  result += f"<b>Q:</b> {question['question']}<br><b>Your Answer:</b> {user_response}<br><br>"
67
  return result
68
 
 
 
 
 
 
 
69
  with gr.Blocks() as demo:
70
  gr.Markdown("### Interactive Questions")
71
 
@@ -73,14 +79,16 @@ with gr.Blocks() as demo:
73
  audio_input = gr.Audio(type="filepath", label="Your answer", visible=True)
74
  transcription_output = gr.Textbox(label="Transcription", visible=True)
75
  btn_next = gr.Button("Next", visible=True)
 
76
  final_output = gr.HTML(visible=False)
77
 
78
  def load_first_question():
79
  return next_question()
80
 
81
- demo.load(load_first_question, outputs=[question_audio, audio_input, transcription_output, btn_next, final_output])
82
 
83
- btn_next.click(next_question, outputs=[question_audio, audio_input, transcription_output, btn_next, final_output])
84
  audio_input.change(save_response, inputs=audio_input, outputs=transcription_output)
 
85
 
86
  demo.launch()
 
49
  full_text = f"{context} {question}"
50
  question_audio = speak_text(full_text)
51
  current_question += 1
52
+ return gr.update(value=question_audio, visible=True), gr.update(visible=True), questions[current_question-1]["label"], gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
53
  else:
54
  final_results = evaluate_responses()
55
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(value=final_results, visible=True), gr.update(visible=True)
56
 
57
  def save_response(audio):
58
  transcription = transcribe_audio(audio)
 
66
  result += f"<b>Q:</b> {question['question']}<br><b>Your Answer:</b> {user_response}<br><br>"
67
  return result
68
 
69
+ def restart():
70
+ global current_question, responses
71
+ current_question = 0
72
+ responses = []
73
+ return load_first_question()
74
+
75
  with gr.Blocks() as demo:
76
  gr.Markdown("### Interactive Questions")
77
 
 
79
  audio_input = gr.Audio(type="filepath", label="Your answer", visible=True)
80
  transcription_output = gr.Textbox(label="Transcription", visible=True)
81
  btn_next = gr.Button("Next", visible=True)
82
+ btn_restart = gr.Button("Restart", visible=False)
83
  final_output = gr.HTML(visible=False)
84
 
85
  def load_first_question():
86
  return next_question()
87
 
88
+ demo.load(load_first_question, outputs=[question_audio, audio_input, transcription_output, btn_next, final_output, btn_restart])
89
 
90
+ btn_next.click(next_question, outputs=[question_audio, audio_input, transcription_output, btn_next, final_output, btn_restart])
91
  audio_input.change(save_response, inputs=audio_input, outputs=transcription_output)
92
+ btn_restart.click(restart, outputs=[question_audio, audio_input, transcription_output, btn_next, final_output, btn_restart])
93
 
94
  demo.launch()