IliaLarchenko commited on
Commit
81f0a03
1 Parent(s): ab840dc

Split UI code

Browse files
Files changed (4) hide show
  1. app.py +10 -159
  2. ui/coding.py +125 -0
  3. ui/instructions.py +45 -0
  4. utils/params.py +9 -0
app.py CHANGED
@@ -1,179 +1,30 @@
1
  import os
2
 
3
  import gradio as gr
4
- import numpy as np
5
 
6
  from api.audio import STTManager, TTSManager
7
  from api.llm import LLMManager
8
  from config import config
9
  from docs.instruction import instruction
10
- from resources.data import fixed_messages, topics_list
11
  from resources.prompts import prompts
12
- from utils.ui import add_candidate_message, add_interviewer_message, get_status_color
 
 
13
 
14
  llm = LLMManager(config, prompts)
15
  tts = TTSManager(config)
16
  stt = STTManager(config)
17
 
 
 
18
  # Interface
19
- with gr.Blocks(title="AI Interviewer") as demo:
20
- if os.getenv("IS_DEMO"):
21
- gr.Markdown(instruction["demo"])
22
 
23
- started_coding = gr.State(False)
24
  audio_output = gr.Audio(label="Play audio", autoplay=True, visible=os.environ.get("DEBUG", False), streaming=tts.streaming)
25
- with gr.Tab("Instruction") as instruction_tab:
26
- with gr.Row():
27
- with gr.Column(scale=2):
28
- gr.Markdown(instruction["introduction"])
29
- with gr.Column(scale=1):
30
- space = " " * 10
31
-
32
- tts_status = get_status_color(tts)
33
- gr.Markdown(f"TTS status: {tts_status}{space}{config.tts.name}")
34
-
35
- stt_status = get_status_color(stt)
36
- gr.Markdown(f"STT status: {stt_status}{space}{config.stt.name}")
37
-
38
- llm_status = get_status_color(llm)
39
- gr.Markdown(f"LLM status: {llm_status}{space}{config.llm.name}")
40
-
41
- gr.Markdown(instruction["quick_start"])
42
- with gr.Row():
43
- with gr.Column(scale=2):
44
- gr.Markdown(instruction["interface"])
45
- with gr.Column(scale=1):
46
- gr.Markdown("Bot interaction area will look like this. Use Record button to record your answer.")
47
- gr.Markdown("Click 'Send' to send you answer and get a reply.")
48
- chat_example = gr.Chatbot(
49
- label="Chat", show_label=False, show_share_button=False, value=[["Candidate message", "Interviewer message"]]
50
- )
51
- default_audio_params = {
52
- "label": "Record answer",
53
- "sources": ["microphone"],
54
- "type": "numpy",
55
- "waveform_options": {"show_controls": False},
56
- "editable": False,
57
- "container": False,
58
- "show_share_button": False,
59
- "streaming": stt.streaming,
60
- }
61
- send_btn_example = gr.Button("Send", interactive=False)
62
- audio_input_example = gr.Audio(interactive=True, **default_audio_params)
63
- gr.Markdown(instruction["models"])
64
- gr.Markdown(instruction["acknowledgements"])
65
- gr.Markdown(instruction["legal"])
66
-
67
- with gr.Tab("Coding") as coding_tab:
68
- chat_history = gr.State([])
69
- previous_code = gr.State("")
70
- with gr.Accordion("Settings") as init_acc:
71
- with gr.Row():
72
- with gr.Column():
73
- gr.Markdown("##### Problem settings")
74
- with gr.Row():
75
- gr.Markdown("Difficulty")
76
- difficulty_select = gr.Dropdown(
77
- label="Select difficulty",
78
- choices=["Easy", "Medium", "Hard"],
79
- value="Medium",
80
- container=False,
81
- allow_custom_value=True,
82
- )
83
- with gr.Row():
84
- gr.Markdown("Topic (can type custom value)")
85
- topic_select = gr.Dropdown(
86
- label="Select topic", choices=topics_list, value="Arrays", container=False, allow_custom_value=True
87
- )
88
- with gr.Column(scale=2):
89
- requirements = gr.Textbox(label="Requirements", placeholder="Specify additional requirements", lines=5)
90
- start_btn = gr.Button("Generate a problem")
91
-
92
- with gr.Accordion("Problem statement", open=True) as problem_acc:
93
- description = gr.Markdown()
94
- with gr.Accordion("Solution", open=False) as solution_acc:
95
- with gr.Row() as content:
96
- with gr.Column(scale=2):
97
- code = gr.Code(
98
- label="Please write your code here. You can use any language, but only Python syntax highlighting is available.",
99
- language="python",
100
- lines=46,
101
- )
102
- with gr.Column(scale=1):
103
- end_btn = gr.Button("Finish the interview", interactive=False)
104
- chat = gr.Chatbot(label="Chat", show_label=False, show_share_button=False)
105
- message = gr.Textbox(
106
- label="Message",
107
- placeholder="Your message will appear here",
108
- show_label=False,
109
- lines=3,
110
- max_lines=3,
111
- interactive=False,
112
- )
113
- send_btn = gr.Button("Send", interactive=False)
114
- audio_input = gr.Audio(interactive=False, **default_audio_params)
115
-
116
- audio_buffer = gr.State(np.array([], dtype=np.int16))
117
- transcript = gr.State({"words": [], "not_confirmed": 0, "last_cutoff": 0, "text": ""})
118
-
119
- with gr.Accordion("Feedback", open=True) as feedback_acc:
120
- feedback = gr.Markdown()
121
-
122
- # Events
123
- coding_tab.select(fn=add_interviewer_message(fixed_messages["intro"]), inputs=[chat, started_coding], outputs=[chat]).success(
124
- fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]
125
- )
126
-
127
- start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).success(
128
- fn=lambda: True, outputs=[started_coding]
129
- ).success(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]).success(
130
- fn=lambda: (gr.update(open=False), gr.update(interactive=False)), outputs=[init_acc, start_btn]
131
- ).success(
132
- fn=llm.get_problem,
133
- inputs=[requirements, difficulty_select, topic_select],
134
- outputs=[description],
135
- scroll_to_output=True,
136
- ).success(
137
- fn=llm.init_bot, inputs=[description], outputs=[chat_history]
138
- ).success(
139
- fn=lambda: (gr.update(open=True), gr.update(interactive=True), gr.update(interactive=True)),
140
- outputs=[solution_acc, end_btn, audio_input],
141
- )
142
-
143
- end_btn.click(
144
- fn=add_interviewer_message(fixed_messages["end"]),
145
- inputs=[chat],
146
- outputs=[chat],
147
- ).success(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]).success(
148
- fn=lambda: (gr.update(open=False), gr.update(interactive=False), gr.update(open=False), gr.update(interactive=False)),
149
- outputs=[solution_acc, end_btn, problem_acc, audio_input],
150
- ).success(
151
- fn=llm.end_interview, inputs=[description, chat_history], outputs=[feedback]
152
- )
153
-
154
- send_btn.click(fn=add_candidate_message, inputs=[message, chat], outputs=[chat]).success(fn=lambda: None, outputs=[message]).success(
155
- fn=llm.send_request,
156
- inputs=[code, previous_code, chat_history, chat],
157
- outputs=[chat_history, chat, previous_code],
158
- ).success(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]).success(
159
- fn=lambda: gr.update(interactive=False), outputs=[send_btn]
160
- ).success(
161
- fn=lambda: np.array([], dtype=np.int16), outputs=[audio_buffer]
162
- ).success(
163
- fn=lambda: {"words": [], "not_confirmed": 0, "last_cutoff": 0, "text": ""}, outputs=[transcript]
164
- )
165
 
166
- if stt.streaming:
167
- audio_input.stream(
168
- stt.process_audio_chunk,
169
- inputs=[audio_input, audio_buffer, transcript],
170
- outputs=[transcript, audio_buffer, message],
171
- show_progress="hidden",
172
- )
173
- audio_input.stop_recording(fn=lambda: gr.update(interactive=True), outputs=[send_btn])
174
- else:
175
- audio_input.stop_recording(fn=stt.speech_to_text_full, inputs=[audio_input], outputs=[message]).success(
176
- fn=lambda: gr.update(interactive=True), outputs=[send_btn]
177
- ).success(fn=lambda: None, outputs=[audio_input])
178
 
179
  demo.launch(show_api=False)
 
1
  import os
2
 
3
  import gradio as gr
 
4
 
5
  from api.audio import STTManager, TTSManager
6
  from api.llm import LLMManager
7
  from config import config
8
  from docs.instruction import instruction
 
9
  from resources.prompts import prompts
10
+ from ui.coding import get_codding_ui
11
+ from ui.instructions import get_instructions_ui
12
+ from utils.params import default_audio_params
13
 
14
  llm = LLMManager(config, prompts)
15
  tts = TTSManager(config)
16
  stt = STTManager(config)
17
 
18
+ default_audio_params["streaming"] = stt.streaming
19
+
20
  # Interface
 
 
 
21
 
22
+ with gr.Blocks(title="AI Interviewer") as demo:
23
  audio_output = gr.Audio(label="Play audio", autoplay=True, visible=os.environ.get("DEBUG", False), streaming=tts.streaming)
24
+ instructions_tab = get_instructions_ui(llm, tts, stt, default_audio_params)
25
+ coding_tab = get_codding_ui(llm, tts, stt, default_audio_params, audio_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ instructions_tab.render()
28
+ coding_tab.render()
 
 
 
 
 
 
 
 
 
 
29
 
30
  demo.launch(show_api=False)
ui/coding.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+
4
+ from resources.data import fixed_messages, topics_list
5
+ from utils.ui import add_candidate_message, add_interviewer_message
6
+
7
+
8
+ def get_codding_ui(llm, tts, stt, default_audio_params, audio_output):
9
+ with gr.Tab("Coding", render=False) as coding_tab:
10
+ chat_history = gr.State([])
11
+ previous_code = gr.State("")
12
+ started_coding = gr.State(False)
13
+ with gr.Accordion("Settings") as init_acc:
14
+ with gr.Row():
15
+ with gr.Column():
16
+ gr.Markdown("##### Problem settings")
17
+ with gr.Row():
18
+ gr.Markdown("Difficulty")
19
+ difficulty_select = gr.Dropdown(
20
+ label="Select difficulty",
21
+ choices=["Easy", "Medium", "Hard"],
22
+ value="Medium",
23
+ container=False,
24
+ allow_custom_value=True,
25
+ )
26
+ with gr.Row():
27
+ gr.Markdown("Topic (can type custom value)")
28
+ topic_select = gr.Dropdown(
29
+ label="Select topic", choices=topics_list, value="Arrays", container=False, allow_custom_value=True
30
+ )
31
+ with gr.Column(scale=2):
32
+ requirements = gr.Textbox(label="Requirements", placeholder="Specify additional requirements", lines=5)
33
+ start_btn = gr.Button("Generate a problem")
34
+
35
+ with gr.Accordion("Problem statement", open=True) as problem_acc:
36
+ description = gr.Markdown()
37
+ with gr.Accordion("Solution", open=False) as solution_acc:
38
+ with gr.Row() as content:
39
+ with gr.Column(scale=2):
40
+ code = gr.Code(
41
+ label="Please write your code here. You can use any language, but only Python syntax highlighting is available.",
42
+ language="python",
43
+ lines=46,
44
+ )
45
+ with gr.Column(scale=1):
46
+ end_btn = gr.Button("Finish the interview", interactive=False)
47
+ chat = gr.Chatbot(label="Chat", show_label=False, show_share_button=False)
48
+ message = gr.Textbox(
49
+ label="Message",
50
+ placeholder="Your message will appear here",
51
+ show_label=False,
52
+ lines=3,
53
+ max_lines=3,
54
+ interactive=False,
55
+ )
56
+ send_btn = gr.Button("Send", interactive=False)
57
+ audio_input = gr.Audio(interactive=False, **default_audio_params)
58
+
59
+ audio_buffer = gr.State(np.array([], dtype=np.int16))
60
+ transcript = gr.State({"words": [], "not_confirmed": 0, "last_cutoff": 0, "text": ""})
61
+
62
+ with gr.Accordion("Feedback", open=True) as feedback_acc:
63
+ feedback = gr.Markdown()
64
+
65
+ start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).success(
66
+ fn=lambda: True, outputs=[started_coding]
67
+ ).success(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]).success(
68
+ fn=lambda: (gr.update(open=False), gr.update(interactive=False)), outputs=[init_acc, start_btn]
69
+ ).success(
70
+ fn=llm.get_problem,
71
+ inputs=[requirements, difficulty_select, topic_select],
72
+ outputs=[description],
73
+ scroll_to_output=True,
74
+ ).success(
75
+ fn=llm.init_bot, inputs=[description], outputs=[chat_history]
76
+ ).success(
77
+ fn=lambda: (gr.update(open=True), gr.update(interactive=True), gr.update(interactive=True)),
78
+ outputs=[solution_acc, end_btn, audio_input],
79
+ )
80
+
81
+ end_btn.click(
82
+ fn=add_interviewer_message(fixed_messages["end"]),
83
+ inputs=[chat],
84
+ outputs=[chat],
85
+ ).success(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]).success(
86
+ fn=lambda: (gr.update(open=False), gr.update(interactive=False), gr.update(open=False), gr.update(interactive=False)),
87
+ outputs=[solution_acc, end_btn, problem_acc, audio_input],
88
+ ).success(
89
+ fn=llm.end_interview, inputs=[description, chat_history], outputs=[feedback]
90
+ )
91
+
92
+ send_btn.click(fn=add_candidate_message, inputs=[message, chat], outputs=[chat]).success(
93
+ fn=lambda: None, outputs=[message]
94
+ ).success(
95
+ fn=llm.send_request,
96
+ inputs=[code, previous_code, chat_history, chat],
97
+ outputs=[chat_history, chat, previous_code],
98
+ ).success(
99
+ fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]
100
+ ).success(
101
+ fn=lambda: gr.update(interactive=False), outputs=[send_btn]
102
+ ).success(
103
+ fn=lambda: np.array([], dtype=np.int16), outputs=[audio_buffer]
104
+ ).success(
105
+ fn=lambda: {"words": [], "not_confirmed": 0, "last_cutoff": 0, "text": ""}, outputs=[transcript]
106
+ )
107
+
108
+ if stt.streaming:
109
+ audio_input.stream(
110
+ stt.process_audio_chunk,
111
+ inputs=[audio_input, audio_buffer, transcript],
112
+ outputs=[transcript, audio_buffer, message],
113
+ show_progress="hidden",
114
+ )
115
+ audio_input.stop_recording(fn=lambda: gr.update(interactive=True), outputs=[send_btn])
116
+ else:
117
+ audio_input.stop_recording(fn=stt.speech_to_text_full, inputs=[audio_input], outputs=[message]).success(
118
+ fn=lambda: gr.update(interactive=True), outputs=[send_btn]
119
+ ).success(fn=lambda: None, outputs=[audio_input])
120
+
121
+ coding_tab.select(fn=add_interviewer_message(fixed_messages["intro"]), inputs=[chat, started_coding], outputs=[chat]).success(
122
+ fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]
123
+ )
124
+
125
+ return coding_tab
ui/instructions.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+
5
+ from docs.instruction import instruction
6
+ from utils.ui import get_status_color
7
+
8
+
9
+ def get_instructions_ui(llm, tts, stt, default_audio_params):
10
+ with gr.Tab("Instruction", render=False) as instruction_tab:
11
+ if os.getenv("IS_DEMO"):
12
+ gr.Markdown(instruction["demo"])
13
+
14
+ with gr.Row():
15
+ with gr.Column(scale=2):
16
+ gr.Markdown(instruction["introduction"])
17
+ with gr.Column(scale=1):
18
+ space = " " * 10
19
+
20
+ tts_status = get_status_color(tts)
21
+ gr.Markdown(f"TTS status: {tts_status}{space}{tts.config.tts.name}")
22
+
23
+ stt_status = get_status_color(stt)
24
+ gr.Markdown(f"STT status: {stt_status}{space}{stt.config.stt.name}")
25
+
26
+ llm_status = get_status_color(llm)
27
+ gr.Markdown(f"LLM status: {llm_status}{space}{llm.config.llm.name}")
28
+
29
+ gr.Markdown(instruction["quick_start"])
30
+ with gr.Row():
31
+ with gr.Column(scale=2):
32
+ gr.Markdown(instruction["interface"])
33
+ with gr.Column(scale=1):
34
+ gr.Markdown("Bot interaction area will look like this. Use Record button to record your answer.")
35
+ gr.Markdown("Click 'Send' to send you answer and get a reply.")
36
+ chat_example = gr.Chatbot(
37
+ label="Chat", show_label=False, show_share_button=False, value=[["Candidate message", "Interviewer message"]]
38
+ )
39
+ send_btn_example = gr.Button("Send", interactive=False)
40
+ audio_input_example = gr.Audio(interactive=True, **default_audio_params)
41
+ gr.Markdown(instruction["models"])
42
+ gr.Markdown(instruction["acknowledgements"])
43
+ gr.Markdown(instruction["legal"])
44
+
45
+ return instruction_tab
utils/params.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ default_audio_params = {
2
+ "label": "Record answer",
3
+ "sources": ["microphone"],
4
+ "type": "numpy",
5
+ "waveform_options": {"show_controls": False},
6
+ "editable": False,
7
+ "container": False,
8
+ "show_share_button": False,
9
+ }