IliaLarchenko commited on
Commit
f6e34f2
1 Parent(s): c8e8be4

Added chatting with interviewer

Browse files
Files changed (2) hide show
  1. app.py +9 -1
  2. llm.py +22 -0
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
 
3
- from llm import get_problem
4
 
5
  with gr.Blocks() as demo:
6
  gr.Markdown("Your coding interview practice AI assistant!")
@@ -15,8 +15,13 @@ with gr.Blocks() as demo:
15
  "Or keep it blank to just get a random question."
16
  ),
17
  )
 
 
 
 
18
  start_btn = gr.Button("Start")
19
  with gr.Accordion("Solution", open=True) as solution_acc:
 
20
  with gr.Accordion("Problem description", open=True) as solution_acc:
21
  description = gr.Markdown()
22
  with gr.Row() as content:
@@ -31,6 +36,9 @@ with gr.Blocks() as demo:
31
  feedback = gr.Markdown()
32
 
33
  start_btn.click(fn=get_problem, inputs=requirements, outputs=[description, chat_history], scroll_to_output=True)
 
 
 
34
 
35
 
36
  demo.launch()
 
1
  import gradio as gr
2
 
3
+ from llm import get_problem, send_request
4
 
5
  with gr.Blocks() as demo:
6
  gr.Markdown("Your coding interview practice AI assistant!")
 
15
  "Or keep it blank to just get a random question."
16
  ),
17
  )
18
+ # TODO: select language
19
+ # TODO: select difficulty
20
+ # TODO: select topic
21
+ # TODO: select LLM model
22
  start_btn = gr.Button("Start")
23
  with gr.Accordion("Solution", open=True) as solution_acc:
24
+ # TODO: auto open close
25
  with gr.Accordion("Problem description", open=True) as solution_acc:
26
  description = gr.Markdown()
27
  with gr.Row() as content:
 
36
  feedback = gr.Markdown()
37
 
38
  start_btn.click(fn=get_problem, inputs=requirements, outputs=[description, chat_history], scroll_to_output=True)
39
+ answer_btn.click(
40
+ fn=send_request, inputs=[code, previous_code, message, chat_history, chat], outputs=[chat_history, chat, message, previous_code]
41
+ )
42
 
43
 
44
  demo.launch()
llm.py CHANGED
@@ -61,3 +61,25 @@ def get_problem(requirements="", client=client):
61
  chat_history = init_bot(question)
62
 
63
  return question, chat_history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  chat_history = init_bot(question)
62
 
63
  return question, chat_history
64
+
65
+
66
+ def send_request(code, previous_code, message, chat_history, chat_display, client=client):
67
+ if code != previous_code:
68
+ chat_history.append({"role": "user", "content": f"My latest code: {code}"})
69
+ chat_history.append({"role": "user", "content": message})
70
+
71
+ response = client.chat.completions.create(model="gpt-3.5-turbo", response_format={"type": "json_object"}, messages=chat_history)
72
+
73
+ json_reply = response.choices[0].message.content.strip()
74
+
75
+ try:
76
+ data = json.loads(json_reply)
77
+ reply = data["reply_to_candidate"]
78
+ except json.JSONDecodeError as e:
79
+ print("Failed to decode JSON:", str(e))
80
+
81
+ chat_history.append({"role": "assistant", "content": json_reply})
82
+
83
+ chat_display.append([message, str(reply)])
84
+
85
+ return chat_history, chat_display, "", code