IliaLarchenko commited on
Commit
72abfd9
1 Parent(s): 2f4f2ae

Created models config

Browse files
Files changed (4) hide show
  1. app.py +9 -18
  2. config.py +18 -0
  3. llm.py +18 -14
  4. options.py +0 -4
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
 
3
- from llm import end_interview, get_problem, read_last_message, send_request, transcribe_audio
4
- from options import fixed_messages, models, topics_list
5
 
6
  default_audio_params = {
7
  "label": "Record answer",
@@ -41,8 +41,8 @@ def hide_solution():
41
 
42
  with gr.Blocks() as demo:
43
  gr.Markdown("Your coding interview practice AI assistant!")
44
- # TODO: add other types of interviews (e.g. system design, ML design, behavioral, etc.)
45
-
46
  with gr.Tab("Coding") as coding_tab:
47
  chat_history = gr.State([])
48
  previous_code = gr.State("")
@@ -66,16 +66,10 @@ with gr.Blocks() as demo:
66
  topic_select = gr.Dropdown(
67
  label="Select topic", choices=topics_list, value="Arrays", container=False, allow_custom_value=True
68
  )
69
-
70
- gr.Markdown("##### Assistant settings")
71
- with gr.Row():
72
- gr.Markdown("Select LLM model to use")
73
- model_select = gr.Dropdown(label="Select model", choices=models, value="gpt-3.5-turbo", container=False)
74
  with gr.Column(scale=2):
75
  requirements = gr.Textbox(label="Requirements", placeholder="Specify additional requirements", lines=5)
76
  start_btn = gr.Button("Generate a problem")
77
 
78
- # TODO: select LLM model
79
  with gr.Accordion("Problem statement", open=True) as problem_acc:
80
  description = gr.Markdown()
81
  with gr.Accordion("Solution", open=False) as solution_acc:
@@ -94,21 +88,18 @@ with gr.Blocks() as demo:
94
  with gr.Accordion("Feedback", open=True) as feedback_acc:
95
  feedback = gr.Markdown()
96
 
97
- with gr.Tab("Instruction") as instruction_tab:
98
- pass
99
-
100
  coding_tab.select(fn=add_interviewer_message(fixed_messages["intro"]), inputs=[chat], outputs=[chat])
101
 
102
  start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).then(
103
  fn=get_problem,
104
- inputs=[requirements, difficulty_select, topic_select, model_select],
105
  outputs=[description, chat_history],
106
  scroll_to_output=True,
107
  ).then(fn=hide_settings, inputs=None, outputs=[init_acc, start_btn, solution_acc, end_btn, audio_input])
108
 
109
  message.submit(
110
  fn=send_request,
111
- inputs=[code, previous_code, message, chat_history, chat, model_select],
112
  outputs=[chat_history, chat, message, previous_code],
113
  )
114
 
@@ -117,14 +108,14 @@ with gr.Blocks() as demo:
117
  inputs=[chat],
118
  outputs=[chat],
119
  ).then(
120
- fn=end_interview, inputs=[description, chat_history, model_select], outputs=feedback
121
  ).then(fn=hide_solution, inputs=None, outputs=[solution_acc, end_btn, problem_acc, audio_input])
122
 
123
- audio_input.stop_recording(fn=transcribe_audio, inputs=[audio_input], outputs=[message]).then(
124
  fn=lambda: None, inputs=None, outputs=[audio_input]
125
  ).then(
126
  fn=send_request,
127
- inputs=[code, previous_code, message, chat_history, chat, model_select],
128
  outputs=[chat_history, chat, message, previous_code],
129
  )
130
 
 
1
  import gradio as gr
2
 
3
+ from llm import end_interview, get_problem, read_last_message, send_request, speech_to_text
4
+ from options import fixed_messages, topics_list
5
 
6
  default_audio_params = {
7
  "label": "Record answer",
 
41
 
42
  with gr.Blocks() as demo:
43
  gr.Markdown("Your coding interview practice AI assistant!")
44
+ with gr.Tab("Instruction") as instruction_tab:
45
+ pass
46
  with gr.Tab("Coding") as coding_tab:
47
  chat_history = gr.State([])
48
  previous_code = gr.State("")
 
66
  topic_select = gr.Dropdown(
67
  label="Select topic", choices=topics_list, value="Arrays", container=False, allow_custom_value=True
68
  )
 
 
 
 
 
69
  with gr.Column(scale=2):
70
  requirements = gr.Textbox(label="Requirements", placeholder="Specify additional requirements", lines=5)
71
  start_btn = gr.Button("Generate a problem")
72
 
 
73
  with gr.Accordion("Problem statement", open=True) as problem_acc:
74
  description = gr.Markdown()
75
  with gr.Accordion("Solution", open=False) as solution_acc:
 
88
  with gr.Accordion("Feedback", open=True) as feedback_acc:
89
  feedback = gr.Markdown()
90
 
 
 
 
91
  coding_tab.select(fn=add_interviewer_message(fixed_messages["intro"]), inputs=[chat], outputs=[chat])
92
 
93
  start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).then(
94
  fn=get_problem,
95
+ inputs=[requirements, difficulty_select, topic_select],
96
  outputs=[description, chat_history],
97
  scroll_to_output=True,
98
  ).then(fn=hide_settings, inputs=None, outputs=[init_acc, start_btn, solution_acc, end_btn, audio_input])
99
 
100
  message.submit(
101
  fn=send_request,
102
+ inputs=[code, previous_code, message, chat_history, chat],
103
  outputs=[chat_history, chat, message, previous_code],
104
  )
105
 
 
108
  inputs=[chat],
109
  outputs=[chat],
110
  ).then(
111
+ fn=end_interview, inputs=[description, chat_history], outputs=feedback
112
  ).then(fn=hide_solution, inputs=None, outputs=[solution_acc, end_btn, problem_acc, audio_input])
113
 
114
+ audio_input.stop_recording(fn=speech_to_text, inputs=[audio_input], outputs=[message]).then(
115
  fn=lambda: None, inputs=None, outputs=[audio_input]
116
  ).then(
117
  fn=send_request,
118
+ inputs=[code, previous_code, message, chat_history, chat],
119
  outputs=[chat_history, chat, message, previous_code],
120
  )
121
 
config.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LLM_URL = f"https://api.openai.com/v1"
2
+ LLM_KEY_TYPE = "OPENAI_API_KEY" # there should be an environment variable with this name
3
+ LLM_NAME = "gpt-3.5-turbo"
4
+ # "gpt-3.5-turbo" - ~3 seconds delay with decent quality
5
+ # "gpt-4-turbo","gpt-4", etc. 10+ seconds delay but higher quality
6
+
7
+ STT_URL = f"https://api.openai.com/v1"
8
+ STT_KEY_TYPE = "OPENAI_API_KEY" # there should be an environment variable with this name
9
+ STT_NAME = "whisper-1"
10
+ # "whisper-1" - the only OpenAI STT model available
11
+
12
+
13
+ TTS_URL = f"https://api.openai.com/v1"
14
+ TTS_KEY_TYPE = "OPENAI_API_KEY" # there should be an environment variable with this name
15
+ TTS_NAME = "tts-1"
16
+ # Recommended options
17
+ # "tts-1" - good quality and close to real-time response. Just use this one
18
+ # "tts-1-hd" - slightly better quality with slightly longer response time
llm.py CHANGED
@@ -1,14 +1,19 @@
1
  import json
 
2
 
3
  from dotenv import load_dotenv
4
  from openai import OpenAI
5
 
6
  from audio import numpy_audio_to_bytes
 
7
  from prompts import coding_interviewer_prompt, grading_feedback_prompt, problem_generation_prompt
8
 
9
  load_dotenv()
10
- # TODO: don't use my key
11
- client = OpenAI()
 
 
 
12
 
13
 
14
  def init_bot(problem=""):
@@ -19,7 +24,7 @@ def init_bot(problem=""):
19
  return chat_history
20
 
21
 
22
- def get_problem(requirements, difficulty, topic, model, client=client):
23
  full_prompt = (
24
  f"Create a {difficulty} {topic} coding problem. "
25
  f"Additional requirements: {requirements}. "
@@ -27,7 +32,7 @@ def get_problem(requirements, difficulty, topic, model, client=client):
27
  "Ensure the problem varies each time to provide a wide range of challenges."
28
  )
29
  response = client.chat.completions.create(
30
- model=model,
31
  messages=[
32
  {"role": "system", "content": problem_generation_prompt},
33
  {"role": "user", "content": full_prompt},
@@ -39,7 +44,7 @@ def get_problem(requirements, difficulty, topic, model, client=client):
39
  return question, chat_history
40
 
41
 
42
- def end_interview(problem_description, chat_history, model, client=client):
43
  if not chat_history or len(chat_history) <= 2:
44
  return "No interview content available to review."
45
 
@@ -50,7 +55,7 @@ def end_interview(problem_description, chat_history, model, client=client):
50
  transcript.append(content)
51
 
52
  response = client.chat.completions.create(
53
- model=model,
54
  messages=[
55
  {"role": "system", "content": grading_feedback_prompt},
56
  {"role": "user", "content": f"The original problem to solve: {problem_description}"},
@@ -63,12 +68,12 @@ def end_interview(problem_description, chat_history, model, client=client):
63
  return feedback
64
 
65
 
66
- def send_request(code, previous_code, message, chat_history, chat_display, model, client=client):
67
  if code != previous_code:
68
- chat_history.append({"role": "user", "content": f"My latest code: {code}"})
69
  chat_history.append({"role": "user", "content": message})
70
 
71
- response = client.chat.completions.create(model=model, response_format={"type": "json_object"}, messages=chat_history)
72
 
73
  json_reply = response.choices[0].message.content.strip()
74
 
@@ -85,16 +90,15 @@ def send_request(code, previous_code, message, chat_history, chat_display, model
85
  return chat_history, chat_display, "", code
86
 
87
 
88
- def transcribe_audio(audio, client=client):
89
  transcription = client.audio.transcriptions.create(
90
- model="whisper-1", file=("temp.wav", numpy_audio_to_bytes(audio[1]), "audio/wav"), response_format="text"
91
  )
92
-
93
  return transcription
94
 
95
 
96
- def text_to_speech(text, client=client):
97
- response = client.audio.speech.create(model="tts-1", voice="alloy", input=text)
98
  return response.content
99
 
100
 
 
1
  import json
2
+ import os
3
 
4
  from dotenv import load_dotenv
5
  from openai import OpenAI
6
 
7
  from audio import numpy_audio_to_bytes
8
+ from config import LLM_KEY_TYPE, LLM_NAME, LLM_URL, STT_KEY_TYPE, STT_NAME, STT_URL, TTS_KEY_TYPE, TTS_NAME, TTS_URL
9
  from prompts import coding_interviewer_prompt, grading_feedback_prompt, problem_generation_prompt
10
 
11
  load_dotenv()
12
+
13
+ client_LLM = OpenAI(base_url=LLM_URL, api_key=os.getenv(LLM_KEY_TYPE))
14
+ print(client_LLM.base_url)
15
+ client_STT = OpenAI(base_url=STT_URL, api_key=os.getenv(STT_KEY_TYPE))
16
+ client_TTS = OpenAI(base_url=TTS_URL, api_key=os.getenv(TTS_KEY_TYPE))
17
 
18
 
19
  def init_bot(problem=""):
 
24
  return chat_history
25
 
26
 
27
+ def get_problem(requirements, difficulty, topic, client=client_LLM):
28
  full_prompt = (
29
  f"Create a {difficulty} {topic} coding problem. "
30
  f"Additional requirements: {requirements}. "
 
32
  "Ensure the problem varies each time to provide a wide range of challenges."
33
  )
34
  response = client.chat.completions.create(
35
+ model=LLM_NAME,
36
  messages=[
37
  {"role": "system", "content": problem_generation_prompt},
38
  {"role": "user", "content": full_prompt},
 
44
  return question, chat_history
45
 
46
 
47
+ def end_interview(problem_description, chat_history, client=client_LLM):
48
  if not chat_history or len(chat_history) <= 2:
49
  return "No interview content available to review."
50
 
 
55
  transcript.append(content)
56
 
57
  response = client.chat.completions.create(
58
+ model=LLM_NAME,
59
  messages=[
60
  {"role": "system", "content": grading_feedback_prompt},
61
  {"role": "user", "content": f"The original problem to solve: {problem_description}"},
 
68
  return feedback
69
 
70
 
71
+ def send_request(code, previous_code, message, chat_history, chat_display, client=client_LLM):
72
  if code != previous_code:
73
+ chat_history.append({"role": "user", "content": f"My latest code:\n{code}"})
74
  chat_history.append({"role": "user", "content": message})
75
 
76
+ response = client.chat.completions.create(model=LLM_NAME, response_format={"type": "json_object"}, messages=chat_history)
77
 
78
  json_reply = response.choices[0].message.content.strip()
79
 
 
90
  return chat_history, chat_display, "", code
91
 
92
 
93
+ def speech_to_text(audio, client=client_STT):
94
  transcription = client.audio.transcriptions.create(
95
+ model=STT_NAME, file=("temp.wav", numpy_audio_to_bytes(audio[1]), "audio/wav"), response_format="text"
96
  )
 
97
  return transcription
98
 
99
 
100
+ def text_to_speech(text, client=client_TTS):
101
+ response = client.audio.speech.create(model=TTS_NAME, voice="alloy", input=text)
102
  return response.content
103
 
104
 
options.py CHANGED
@@ -20,10 +20,6 @@ topics_list = [
20
  "Binary Search Trees",
21
  "Tries",
22
  ]
23
- models = ["gpt-3.5-turbo"]
24
- # TODO: add more OAI models
25
- # TODO: add more OS models
26
-
27
 
28
  fixed_messages = {
29
  "intro": "Welcome to the coding interview! I am your AI interview assistant. For the start select the difficulty and topic of the problem you would like to solve. Then click on the 'Generate a problem' button. Good luck!",
 
20
  "Binary Search Trees",
21
  "Tries",
22
  ]
 
 
 
 
23
 
24
  fixed_messages = {
25
  "intro": "Welcome to the coding interview! I am your AI interview assistant. For the start select the difficulty and topic of the problem you would like to solve. Then click on the 'Generate a problem' button. Good luck!",