IliaLarchenko commited on
Commit
3165477
1 Parent(s): 7158957

Added custom interview type

Browse files
resources/data.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  topic_lists = {
2
  "coding": [
3
  "Arrays",
@@ -112,6 +114,9 @@ topic_lists = {
112
  "Transfer Learning",
113
  "Explainable AI",
114
  ],
 
 
 
115
  }
116
 
117
  fixed_messages = {
 
1
+ interview_types = ["coding", "ml_design", "ml_theory", "system_design", "math", "sql", "custom"]
2
+
3
  topic_lists = {
4
  "coding": [
5
  "Arrays",
 
114
  "Transfer Learning",
115
  "Explainable AI",
116
  ],
117
+ "custom": [
118
+ "Specify any topic",
119
+ ],
120
  }
121
 
122
  fixed_messages = {
resources/prompts.py CHANGED
@@ -292,4 +292,7 @@ Provide specific feedback with examples from the interview, offering corrections
292
  Provide detailed feedback, highlighting strengths and areas where understanding is lacking, supported by specific examples from the interview. Suggest targeted resources or study areas to help candidates improve. Summarize key points at the end of your feedback, focusing on actionable steps for improvement and further learning.
293
  """
294
  ),
 
 
 
295
  }
 
292
  Provide detailed feedback, highlighting strengths and areas where understanding is lacking, supported by specific examples from the interview. Suggest targeted resources or study areas to help candidates improve. Summarize key points at the end of your feedback, focusing on actionable steps for improvement and further learning.
293
  """
294
  ),
295
+ "custom_problem_generation_prompt": base_problem_generation,
296
+ "custom_interviewer_prompt": base_interviewer,
297
+ "custom_grading_feedback_prompt": base_grading_feedback,
298
  }
tests/test_resources.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from resources.data import topic_lists, interview_types
2
+ from resources.prompts import prompts
3
+
4
+
5
+ def test_topics_completeness() -> None:
6
+ """
7
+ Test the completeness of topic lists.
8
+ """
9
+
10
+ assert len(topic_lists) == len(interview_types)
11
+ for interview_type in interview_types:
12
+ assert interview_type in topic_lists
13
+ assert len(topic_lists[interview_type]) > 0
14
+
15
+
16
+ def test_prompts_completeness() -> None:
17
+ """
18
+ Test the completeness of prompts.
19
+ """
20
+
21
+ assert len(prompts) == len(interview_types) * 3
22
+ for interview_type in interview_types:
23
+ assert f"{interview_type}_problem_generation_prompt" in prompts
24
+ assert f"{interview_type}_interviewer_prompt" in prompts
25
+ assert f"{interview_type}_grading_feedback_prompt" in prompts
ui/coding.py CHANGED
@@ -5,7 +5,7 @@ import os
5
  from itertools import chain
6
  import time
7
 
8
- from resources.data import fixed_messages, topic_lists
9
  from utils.ui import add_candidate_message, add_interviewer_message
10
  from typing import List, Dict, Generator, Optional, Tuple
11
  from functools import partial
@@ -127,7 +127,7 @@ def get_problem_solving_ui(llm: LLMManager, tts: TTSManager, stt: STTManager, de
127
  interview_type_select = gr.Dropdown(
128
  show_label=False,
129
  info="Type of the interview.",
130
- choices=["coding", "ml_design", "ml_theory", "system_design", "math", "sql"],
131
  value="coding",
132
  container=True,
133
  allow_custom_value=False,
 
5
  from itertools import chain
6
  import time
7
 
8
+ from resources.data import fixed_messages, topic_lists, interview_types
9
  from utils.ui import add_candidate_message, add_interviewer_message
10
  from typing import List, Dict, Generator, Optional, Tuple
11
  from functools import partial
 
127
  interview_type_select = gr.Dropdown(
128
  show_label=False,
129
  info="Type of the interview.",
130
+ choices=interview_types,
131
  value="coding",
132
  container=True,
133
  allow_custom_value=False,