Letsch22 commited on
Commit
25b94e8
1 Parent(s): 873cd5e

Rename methods for clarity

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -16,14 +16,10 @@ class MockInterviewer:
16
  self._assistant_id_cache: Dict[str, str] = {}
17
  self.clear_thread()
18
 
19
- def interface_chat(self, message: Dict, history: List[List], job_role: str, company: str) -> Generator:
20
  yield self._chat(message.get('text'), job_role, company)
21
 
22
- def clear_thread(self) -> None:
23
- print('Initializing new thread')
24
- self._thread = self._client.beta.threads.create()
25
-
26
- def transcript(self, audio: str, job_role: str, company: str) -> str:
27
  with open(audio, 'rb') as audio_file:
28
  transcriptions = self._client.audio.transcriptions.create(
29
  model='whisper-1',
@@ -33,6 +29,10 @@ class MockInterviewer:
33
  response = self._chat(transcriptions.text, job_role, company)
34
  return [(transcriptions.text, response)]
35
 
 
 
 
 
36
  def _chat(self, message: str, job_role: str, company: str) -> str:
37
  print('Started chat')
38
  self._validate_fields(job_role, company)
@@ -117,7 +117,7 @@ with gr.Blocks() as demo:
117
  company = gr.Textbox(label='Company', placeholder='Amazon')
118
 
119
  chat_interface = gr.ChatInterface(
120
- fn=mock_interviewer.interface_chat,
121
  additional_inputs=[job_role, company],
122
  title='I am your AI mock interviewer',
123
  description='Make your selections above to configure me.',
@@ -129,7 +129,7 @@ with gr.Blocks() as demo:
129
  chat_interface.clear_btn.click(mock_interviewer.clear_thread)
130
 
131
  audio = gr.Audio(sources=['microphone'], type='filepath', editable=False)
132
- audio.stop_recording(fn=mock_interviewer.transcript,
133
  inputs=[audio, job_role, company],
134
  outputs=[chat_interface.chatbot],
135
  api_name=False)
 
16
  self._assistant_id_cache: Dict[str, str] = {}
17
  self.clear_thread()
18
 
19
+ def chat_with_text(self, message: Dict, history: List[List], job_role: str, company: str) -> Generator:
20
  yield self._chat(message.get('text'), job_role, company)
21
 
22
+ def chat_with_audio(self, audio: str, job_role: str, company: str) -> str:
 
 
 
 
23
  with open(audio, 'rb') as audio_file:
24
  transcriptions = self._client.audio.transcriptions.create(
25
  model='whisper-1',
 
29
  response = self._chat(transcriptions.text, job_role, company)
30
  return [(transcriptions.text, response)]
31
 
32
+ def clear_thread(self) -> None:
33
+ print('Initializing new thread')
34
+ self._thread = self._client.beta.threads.create()
35
+
36
  def _chat(self, message: str, job_role: str, company: str) -> str:
37
  print('Started chat')
38
  self._validate_fields(job_role, company)
 
117
  company = gr.Textbox(label='Company', placeholder='Amazon')
118
 
119
  chat_interface = gr.ChatInterface(
120
+ fn=mock_interviewer.chat_with_text,
121
  additional_inputs=[job_role, company],
122
  title='I am your AI mock interviewer',
123
  description='Make your selections above to configure me.',
 
129
  chat_interface.clear_btn.click(mock_interviewer.clear_thread)
130
 
131
  audio = gr.Audio(sources=['microphone'], type='filepath', editable=False)
132
+ audio.stop_recording(fn=mock_interviewer.chat_with_audio,
133
  inputs=[audio, job_role, company],
134
  outputs=[chat_interface.chatbot],
135
  api_name=False)