Spaces:
Sleeping
Sleeping
IliaLarchenko
commited on
Commit
•
e71ef7a
1
Parent(s):
8d3b67a
Added read chat after each chat message
Browse files
app.py
CHANGED
@@ -139,11 +139,13 @@ with gr.Blocks(title="AI Interviewer") as demo:
|
|
139 |
feedback = gr.Markdown()
|
140 |
|
141 |
# Events
|
142 |
-
coding_tab.select(fn=add_interviewer_message(fixed_messages["intro"]), inputs=[chat, started_coding], outputs=[chat])
|
|
|
|
|
143 |
|
144 |
start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).then(
|
145 |
fn=lambda: True, outputs=[started_coding]
|
146 |
-
).then(fn=hide_settings, outputs=[init_acc, start_btn]).then(
|
147 |
fn=llm.get_problem,
|
148 |
inputs=[requirements, difficulty_select, topic_select],
|
149 |
outputs=[description],
|
@@ -159,8 +161,10 @@ with gr.Blocks(title="AI Interviewer") as demo:
|
|
159 |
inputs=[chat],
|
160 |
outputs=[chat],
|
161 |
).then(
|
162 |
-
fn=
|
163 |
-
).then(fn=
|
|
|
|
|
164 |
|
165 |
audio_input.stop_recording(fn=stt.speech_to_text, inputs=[audio_input], outputs=[message]).then(
|
166 |
fn=lambda: None, outputs=[audio_input]
|
@@ -168,8 +172,8 @@ with gr.Blocks(title="AI Interviewer") as demo:
|
|
168 |
fn=llm.send_request,
|
169 |
inputs=[code, previous_code, message, chat_history, chat],
|
170 |
outputs=[chat_history, chat, message, previous_code],
|
|
|
|
|
171 |
)
|
172 |
|
173 |
-
chat.change(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output])
|
174 |
-
|
175 |
demo.launch(show_api=False)
|
|
|
139 |
feedback = gr.Markdown()
|
140 |
|
141 |
# Events
|
142 |
+
coding_tab.select(fn=add_interviewer_message(fixed_messages["intro"]), inputs=[chat, started_coding], outputs=[chat]).then(
|
143 |
+
fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]
|
144 |
+
)
|
145 |
|
146 |
start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).then(
|
147 |
fn=lambda: True, outputs=[started_coding]
|
148 |
+
).then(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]).then(fn=hide_settings, outputs=[init_acc, start_btn]).then(
|
149 |
fn=llm.get_problem,
|
150 |
inputs=[requirements, difficulty_select, topic_select],
|
151 |
outputs=[description],
|
|
|
161 |
inputs=[chat],
|
162 |
outputs=[chat],
|
163 |
).then(
|
164 |
+
fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]
|
165 |
+
).then(fn=hide_solution, outputs=[solution_acc, end_btn, problem_acc, audio_input]).then(
|
166 |
+
fn=llm.end_interview, inputs=[description, chat_history], outputs=[feedback]
|
167 |
+
)
|
168 |
|
169 |
audio_input.stop_recording(fn=stt.speech_to_text, inputs=[audio_input], outputs=[message]).then(
|
170 |
fn=lambda: None, outputs=[audio_input]
|
|
|
172 |
fn=llm.send_request,
|
173 |
inputs=[code, previous_code, message, chat_history, chat],
|
174 |
outputs=[chat_history, chat, message, previous_code],
|
175 |
+
).then(
|
176 |
+
fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]
|
177 |
)
|
178 |
|
|
|
|
|
179 |
demo.launch(show_api=False)
|