IliaLarchenko commited on
Commit
a10bc68
1 Parent(s): 1a47458

Refactoring

Browse files
Files changed (4) hide show
  1. app.py +42 -20
  2. llm.py +28 -77
  3. prompts.py +24 -0
  4. requirements.txt +1 -0
app.py CHANGED
@@ -2,43 +2,65 @@ import gradio as gr
2
 
3
  from llm import end_interview, get_problem, send_request
4
 
 
 
 
 
 
5
  with gr.Blocks() as demo:
6
  gr.Markdown("Your coding interview practice AI assistant!")
7
  with gr.Tab("Coding"):
8
  chat_history = gr.State([])
9
  previous_code = gr.State("")
 
10
  with gr.Accordion("Settings") as init_acc:
11
- requirements = gr.Textbox(
12
- label="Requirements",
13
- placeholder=(
14
- "Write any requirements here in a plain text: topic, difficulty, complexity, etc. "
15
- "Or keep it blank to just get a random question."
16
- ),
17
- )
18
- # TODO: select language
19
- # TODO: select difficulty
20
- # TODO: select topic
 
 
 
 
 
 
 
 
 
 
21
  # TODO: select LLM model
22
- start_btn = gr.Button("Start")
23
  with gr.Accordion("Solution", open=True) as solution_acc:
24
- # TODO: auto open close
25
- with gr.Accordion("Problem description", open=True) as solution_acc:
26
- description = gr.Markdown()
27
  with gr.Row() as content:
28
  with gr.Column(scale=2):
29
- code = gr.Code(label="Solution", language="python", lines=20)
 
 
 
30
  message = gr.Textbox(label="Message", lines=1)
31
- answer_btn = gr.Button("Send message")
32
  with gr.Column(scale=1):
33
  chat = gr.Chatbot(label="Chat history")
34
  end_btn = gr.Button("Finish the interview")
35
  with gr.Accordion("Feedback", open=True) as feedback_acc:
36
  feedback = gr.Markdown()
37
 
38
- start_btn.click(fn=get_problem, inputs=requirements, outputs=[description, chat_history], scroll_to_output=True)
39
- answer_btn.click(
40
- fn=send_request, inputs=[code, previous_code, message, chat_history, chat], outputs=[chat_history, chat, message, previous_code]
 
 
 
 
 
 
 
41
  )
42
- end_btn.click(fn=end_interview, inputs=chat_history, outputs=feedback)
43
 
44
  demo.launch()
 
2
 
3
  from llm import end_interview, get_problem, send_request
4
 
5
+ languages_list = ["python", "javascript", "html", "css", "typescript", "dockerfile", "shell", "r", "sql"] # limited by gradio for now
6
+ topics_list = ["Arrays", "Strings", "Linked Lists"]
7
+ models = ["gpt-3.5-turbo"]
8
+
9
+
10
  with gr.Blocks() as demo:
11
  gr.Markdown("Your coding interview practice AI assistant!")
12
  with gr.Tab("Coding"):
13
  chat_history = gr.State([])
14
  previous_code = gr.State("")
15
+ client = gr.State(None)
16
  with gr.Accordion("Settings") as init_acc:
17
+ with gr.Row():
18
+ with gr.Column():
19
+ gr.Markdown("Difficulty")
20
+ difficulty_select = gr.Dropdown(
21
+ label="Select difficulty", choices=["Easy", "Medium", "Hard"], value="Medium", container=False
22
+ )
23
+
24
+ gr.Markdown("Topic")
25
+ topic_select = gr.Dropdown(
26
+ label="Select topic", choices=topics_list, value="Arrays", container=False, allow_custom_value=True
27
+ )
28
+
29
+ gr.Markdown("Select LLM model to use")
30
+ model_select = gr.Dropdown(label="Select model", choices=models, value="gpt-3.5-turbo", container=False)
31
+ with gr.Column():
32
+ requirements = gr.Textbox(
33
+ label="Requirements", placeholder="Specify requirements: topic, difficulty, language, etc.", lines=5
34
+ )
35
+ start_btn = gr.Button("Start")
36
+
37
  # TODO: select LLM model
 
38
  with gr.Accordion("Solution", open=True) as solution_acc:
39
+ description = gr.Markdown()
 
 
40
  with gr.Row() as content:
41
  with gr.Column(scale=2):
42
+ language_select = gr.Dropdown(
43
+ label="Select language", choices=languages_list, value="python", container=False, interactive=True
44
+ )
45
+ code = gr.Code(label="Solution", language=language_select.value, lines=20)
46
  message = gr.Textbox(label="Message", lines=1)
 
47
  with gr.Column(scale=1):
48
  chat = gr.Chatbot(label="Chat history")
49
  end_btn = gr.Button("Finish the interview")
50
  with gr.Accordion("Feedback", open=True) as feedback_acc:
51
  feedback = gr.Markdown()
52
 
53
+ start_btn.click(
54
+ fn=get_problem,
55
+ inputs=[requirements, difficulty_select, topic_select, model_select],
56
+ outputs=[description, chat_history],
57
+ scroll_to_output=True,
58
+ )
59
+ message.submit(
60
+ fn=send_request,
61
+ inputs=[code, previous_code, message, chat_history, chat, model_select],
62
+ outputs=[chat_history, chat, message, previous_code],
63
  )
64
+ end_btn.click(fn=end_interview, inputs=[chat_history, model_select], outputs=feedback)
65
 
66
  demo.launch()
llm.py CHANGED
@@ -1,74 +1,61 @@
1
  import json
2
- import os
3
 
 
4
  from openai import OpenAI
5
 
6
- try:
7
- with open(".env") as file:
8
- for line in file:
9
- key, value = line.strip().split("=", 1)
10
- os.environ[key] = value
11
- except FileNotFoundError:
12
- pass
13
 
 
14
  client = OpenAI()
15
 
16
 
17
  def init_bot(problem=""):
18
- prompt_system = (
19
- "You are ChatGPT acting as a coding round interviewer for a big-tech company. "
20
- "You are very strict. You don't give any hints until candidate is stuck or asks for it. "
21
- "If a candidate made a mistake let them find and debug it themselves. "
22
- "If a solution can be improved let candidate figure it out, you can ask directional questions but delay giving hints. "
23
- "For each version of solution ask candidate about time and space complexity. "
24
- "Strive to get the most optimal solution possible. "
25
- "Always return the answer in json format with 2 fields: reply_to_candidate and hidden_note. "
26
- "reply_to_candidate: the answer that will be shown to the candidate. "
27
- "hidden_note: the concise hidden note that is not visible to the candidate but will be useful for final grading and feedback, "
28
- "it can contain short code snippets, errors found, things to pay attention to. "
29
- "'reply_to_candidate' can not be empty, 'hidden_note' can be empty if there is no new important information to note. "
30
- "When the interview is finished and you don't have any more questions provide a very detailed feedback. "
31
- "Don't wait for the candidate to ask for feedback, provide it as soon as you don't have any more question or if you see that the candidate can't solve the problem at all. "
32
- "Provide detailed feedback using all the notes, mentioning not only the final solution but all issues and mistakes made during the interview. "
33
- )
34
-
35
  chat_history = [
36
- {"role": "system", "content": prompt_system},
37
  {"role": "system", "content": f"The candidate is solving the following problem: {problem}"},
38
  ]
39
-
40
  return chat_history
41
 
42
 
43
- def get_problem(requirements="", client=client):
44
  prompt_system = "You are ChatGPT acting as a coding round interviewer for a big-tech company. "
45
- prompt_start = "Generate a coding problem that is expected to be solvable within 30 minutes. " "Follow the additional instructions: "
46
- prompt_end = (
47
- "Please provide the problem statement, example inputs and outputs, and any special constraints."
48
- "Return the results in nicely formatted markdown."
49
- )
50
- full_prompt = f"{prompt_start} {requirements} {prompt_end}"
51
-
52
  response = client.chat.completions.create(
53
- model="gpt-3.5-turbo",
54
  messages=[
55
  {"role": "system", "content": prompt_system},
56
  {"role": "user", "content": full_prompt},
57
  ],
58
  )
59
-
60
  question = response.choices[0].message.content.strip()
61
  chat_history = init_bot(question)
62
-
63
  return question, chat_history
64
 
65
 
66
- def send_request(code, previous_code, message, chat_history, chat_display, client=client):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  if code != previous_code:
68
  chat_history.append({"role": "user", "content": f"My latest code: {code}"})
69
  chat_history.append({"role": "user", "content": message})
70
 
71
- response = client.chat.completions.create(model="gpt-3.5-turbo", response_format={"type": "json_object"}, messages=chat_history)
72
 
73
  json_reply = response.choices[0].message.content.strip()
74
 
@@ -77,46 +64,10 @@ def send_request(code, previous_code, message, chat_history, chat_display, clien
77
  reply = data["reply_to_candidate"]
78
  except json.JSONDecodeError as e:
79
  print("Failed to decode JSON:", str(e))
 
80
 
81
  chat_history.append({"role": "assistant", "content": json_reply})
82
 
83
  chat_display.append([message, str(reply)])
84
 
85
  return chat_history, chat_display, "", code
86
-
87
-
88
- def end_interview(chat_history, client=client):
89
- prompt_system = (
90
- "You are ChatGPT acting as a grader of the coding round interviewer for a big-tech company. "
91
- "Below you will see the transcript of interview with and candidate."
92
- "Candidate will send you his current code with every message, you can ignore it if it didn't change. "
93
- "Provide very detailed feedback using all the notes and full interview transcript. "
94
- "Take into account all issues and mistakes made during the interview. "
95
- "Provide as many details as possible including: overall feedback, all mistakes, improvement opportunities, "
96
- "communication issues, missed edge cases, and any other valuable feedback. "
97
- "Use examples and code snippets when necessary. "
98
- "If the candidate didn't provide a solution or it was not optimal provide the correct most optimal one. "
99
- "Return the results in nicely formatted markdown."
100
- )
101
-
102
- transcript = []
103
- for message in chat_history[1:]:
104
- if message["role"] == "assistant":
105
- transcript.append(f"Interviewer: {message['content']}")
106
- elif message["role"] == "user":
107
- transcript.append(f"Candidate: {message['content']}")
108
- else:
109
- transcript.append(f"{message['role']}: {message['content']}")
110
-
111
- response = client.chat.completions.create(
112
- model="gpt-3.5-turbo",
113
- messages=[
114
- {"role": "system", "content": prompt_system},
115
- {"role": "user", "content": "Interview transcript:" + "\n\n".join(transcript)},
116
- {"role": "user", "content": "Grade the interview based on the transcript provided and give a feedback."},
117
- ],
118
- )
119
-
120
- feedback = response.choices[0].message.content.strip()
121
-
122
- return feedback
 
1
  import json
 
2
 
3
+ from dotenv import load_dotenv
4
  from openai import OpenAI
5
 
6
+ from prompts import coding_interviewer_prompt, grading_feedback_prompt
 
 
 
 
 
 
7
 
8
+ load_dotenv()
9
  client = OpenAI()
10
 
11
 
12
  def init_bot(problem=""):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  chat_history = [
14
+ {"role": "system", "content": coding_interviewer_prompt},
15
  {"role": "system", "content": f"The candidate is solving the following problem: {problem}"},
16
  ]
 
17
  return chat_history
18
 
19
 
20
+ def get_problem(requirements, difficulty, topic, model, client=client):
21
  prompt_system = "You are ChatGPT acting as a coding round interviewer for a big-tech company. "
22
+ full_prompt = f"Generate a {difficulty} {topic} problem in. Follow additional requirements: {requirements}. The problem should be solvable within 30 minutes."
 
 
 
 
 
 
23
  response = client.chat.completions.create(
24
+ model=model,
25
  messages=[
26
  {"role": "system", "content": prompt_system},
27
  {"role": "user", "content": full_prompt},
28
  ],
29
  )
 
30
  question = response.choices[0].message.content.strip()
31
  chat_history = init_bot(question)
 
32
  return question, chat_history
33
 
34
 
35
+ def end_interview(chat_history, model, client=client):
36
+ transcript = []
37
+ for message in chat_history[1:]:
38
+ role = message["role"]
39
+ content = f"{role.capitalize()}: {message['content']}"
40
+ transcript.append(content)
41
+ response = client.chat.completions.create(
42
+ model=model,
43
+ messages=[
44
+ {"role": "system", "content": grading_feedback_prompt},
45
+ {"role": "user", "content": "Interview transcript:" + "\n\n".join(transcript)},
46
+ {"role": "user", "content": "Grade the interview based on the transcript provided and give a feedback."},
47
+ ],
48
+ )
49
+ feedback = response.choices[0].message.content.strip()
50
+ return feedback
51
+
52
+
53
+ def send_request(code, previous_code, message, chat_history, chat_display, model, client=client):
54
  if code != previous_code:
55
  chat_history.append({"role": "user", "content": f"My latest code: {code}"})
56
  chat_history.append({"role": "user", "content": message})
57
 
58
+ response = client.chat.completions.create(model=model, response_format={"type": "json_object"}, messages=chat_history)
59
 
60
  json_reply = response.choices[0].message.content.strip()
61
 
 
64
  reply = data["reply_to_candidate"]
65
  except json.JSONDecodeError as e:
66
  print("Failed to decode JSON:", str(e))
67
+ reply = "There was an error processing your request."
68
 
69
  chat_history.append({"role": "assistant", "content": json_reply})
70
 
71
  chat_display.append([message, str(reply)])
72
 
73
  return chat_history, chat_display, "", code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
prompts.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Coding round interviewer instructions
2
+ coding_interviewer_prompt = (
3
+ "As an AI acting as a coding interviewer for a major tech company, you are to maintain a strict demeanor. "
4
+ "Provide hints only when the candidate is noticeably stuck or explicitly requests assistance. "
5
+ "Allow candidates to identify and resolve errors independently. "
6
+ "Encourage the candidate to explore improved solutions through probing questions, delaying hints as needed. "
7
+ "Discuss the time and space complexity after each solution iteration, aiming for optimal outcomes. "
8
+ "Responses should be in JSON format with two fields: "
9
+ "1. 'reply_to_candidate': visible feedback to the candidate. "
10
+ "2. 'hidden_note': internal notes useful for grading, possibly including code snippets, identified errors, and key observations. "
11
+ "The 'hidden_note' may be omitted if there are no new critical insights. "
12
+ )
13
+
14
+ # Prompt for grading feedback
15
+ grading_feedback_prompt = (
16
+ "You are the AI grader for a coding interview at a major tech firm. "
17
+ "The following is the interview transcript with the candidate. "
18
+ "Evaluate the transcript. "
19
+ "Provide comprehensive feedback, incorporating all interview notes. "
20
+ "Detail overall performance, specific errors, areas for improvement, communication lapses, overlooked edge cases, and any other relevant observations. "
21
+ "Use code examples to illustrate points where necessary. "
22
+ "If the candidate’s solution was suboptimal or absent, suggest a more optimal solution. "
23
+ "Format all feedback in clear, structured markdown for readability."
24
+ )
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
  gradio==4.26.0
2
  openai==1.19.0
 
 
1
  gradio==4.26.0
2
  openai==1.19.0
3
+ python-dotenv==1.0.1