IliaLarchenko commited on
Commit
6ba2996
β€’
1 Parent(s): c62e737

Added models status check

Browse files
Files changed (2) hide show
  1. app.py +24 -1
  2. llm.py +17 -4
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
 
3
- from llm import end_interview, get_problem, read_last_message, send_request, speech_to_text
4
  from options import fixed_messages, topics_list
5
 
6
  default_audio_params = {
@@ -42,6 +42,29 @@ def hide_solution():
42
  with gr.Blocks() as demo:
43
  gr.Markdown("Your coding interview practice AI assistant!")
44
  with gr.Tab("Instruction") as instruction_tab:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  pass
46
  with gr.Tab("Coding") as coding_tab:
47
  chat_history = gr.State([])
 
1
  import gradio as gr
2
 
3
+ from llm import end_interview, get_problem, read_last_message, send_request, speech_to_text, test_connection, text_to_speech
4
  from options import fixed_messages, topics_list
5
 
6
  default_audio_params = {
 
42
  with gr.Blocks() as demo:
43
  gr.Markdown("Your coding interview practice AI assistant!")
44
  with gr.Tab("Instruction") as instruction_tab:
45
+ with gr.Row():
46
+ with gr.Column(scale=10):
47
+ gr.Markdown("### Instructions")
48
+
49
+ pass
50
+ with gr.Column(scale=1):
51
+ try:
52
+ audio_test = text_to_speech("Handshake")
53
+ gr.Markdown("TTS status: 🟒")
54
+ except:
55
+ gr.Markdown("TTS status: πŸ”΄")
56
+ try:
57
+ text_test = speech_to_text(audio_test, False)
58
+ gr.Markdown("STT status: 🟒")
59
+ except:
60
+ gr.Markdown("STT status: πŸ”΄")
61
+
62
+ try:
63
+ test_connection()
64
+ gr.Markdown("LLM status: 🟒")
65
+ except:
66
+ gr.Markdown("LLM status: πŸ”΄")
67
+
68
  pass
69
  with gr.Tab("Coding") as coding_tab:
70
  chat_history = gr.State([])
llm.py CHANGED
@@ -15,6 +15,16 @@ load_dotenv()
15
  client_LLM = OpenAI(base_url=LLM_URL, api_key=os.getenv(f"{LLM_TYPE}_KEY"))
16
 
17
 
 
 
 
 
 
 
 
 
 
 
18
  def init_bot(problem=""):
19
  chat_history = [
20
  {"role": "system", "content": coding_interviewer_prompt},
@@ -36,7 +46,7 @@ def get_problem(requirements, difficulty, topic, client=client_LLM):
36
  {"role": "system", "content": problem_generation_prompt},
37
  {"role": "user", "content": full_prompt},
38
  ],
39
- temperature=1.0, # Adjusted for a balance between creativity and coherency
40
  )
41
  question = response.choices[0].message.content.strip()
42
  chat_history = init_bot(question)
@@ -82,16 +92,19 @@ def send_request(code, previous_code, message, chat_history, chat_display, clien
82
  return chat_history, chat_display, "", code
83
 
84
 
85
- def speech_to_text(audio):
86
  assert STT_TYPE in ["OPENAI_API", "HF_API"]
87
 
 
 
 
88
  if STT_TYPE == "OPENAI_API":
89
- data = ("temp.wav", numpy_audio_to_bytes(audio[1]), "audio/wav")
90
  client = OpenAI(base_url=STT_URL, api_key=os.getenv(f"{STT_TYPE}_KEY"))
91
  transcription = client.audio.transcriptions.create(model=STT_NAME, file=data, response_format="text")
92
  elif STT_TYPE == "HF_API":
93
  headers = {"Authorization": "Bearer " + os.getenv(f"{STT_TYPE}_KEY")}
94
- transcription = requests.post(STT_URL, headers=headers, data=numpy_audio_to_bytes(audio[1]))
95
  transcription = transcription.json()["text"]
96
 
97
  return transcription
 
15
  client_LLM = OpenAI(base_url=LLM_URL, api_key=os.getenv(f"{LLM_TYPE}_KEY"))
16
 
17
 
18
+ def test_connection():
19
+ response = client_LLM.chat.completions.create(
20
+ model=LLM_NAME,
21
+ messages=[
22
+ {"role": "system", "content": "Test connection"},
23
+ ],
24
+ )
25
+ return response.choices[0].message.content.strip()
26
+
27
+
28
  def init_bot(problem=""):
29
  chat_history = [
30
  {"role": "system", "content": coding_interviewer_prompt},
 
46
  {"role": "system", "content": problem_generation_prompt},
47
  {"role": "user", "content": full_prompt},
48
  ],
49
+ temperature=1.0,
50
  )
51
  question = response.choices[0].message.content.strip()
52
  chat_history = init_bot(question)
 
92
  return chat_history, chat_display, "", code
93
 
94
 
95
+ def speech_to_text(audio, convert_to_bytes=True):
96
  assert STT_TYPE in ["OPENAI_API", "HF_API"]
97
 
98
+ if convert_to_bytes:
99
+ audio = numpy_audio_to_bytes(audio[1])
100
+
101
  if STT_TYPE == "OPENAI_API":
102
+ data = ("temp.wav", audio, "audio/wav")
103
  client = OpenAI(base_url=STT_URL, api_key=os.getenv(f"{STT_TYPE}_KEY"))
104
  transcription = client.audio.transcriptions.create(model=STT_NAME, file=data, response_format="text")
105
  elif STT_TYPE == "HF_API":
106
  headers = {"Authorization": "Bearer " + os.getenv(f"{STT_TYPE}_KEY")}
107
+ transcription = requests.post(STT_URL, headers=headers, data=audio)
108
  transcription = transcription.json()["text"]
109
 
110
  return transcription