acrep commited on
Commit
1528267
1 Parent(s): 9850e12

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -8
app.py CHANGED
@@ -95,22 +95,59 @@ class MockInterviewer:
95
  def _create_cache_key(self, job_role: str, company: str) -> str:
96
  return f'{job_role.lower()}+{company.lower()}'
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  # Creating the Gradio interface
99
  with gr.Blocks() as demo:
100
  mock_interviewer = MockInterviewer()
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  chat_interface = gr.ChatInterface(
103
- mock_interviewer.chat,
104
- additional_inputs=[
105
- gr.Textbox(label='Job Role', placeholder='Product Manager'),
106
- gr.Textbox(label='Company', placeholder='Amazon')
107
- ],
108
- title='I am your AI mock interviewer',
109
  description='Make your selections above to configure me.',
110
  multimodal=True,
111
  retry_btn=None,
112
- undo_btn=None).queue()
113
-
 
114
  chat_interface.load(mock_interviewer.clear_thread)
115
  chat_interface.clear_btn.click(mock_interviewer.clear_thread)
116
 
 
95
  def _create_cache_key(self, job_role: str, company: str) -> str:
96
  return f'{job_role.lower()}+{company.lower()}'
97
 
98
+ def transcribe_and_chat(self, audio_file, job_role: str, company: str):
99
+ # Transcribe audio
100
+ transcript = self.transcribe_audio(audio_file)
101
+ # Now proceed with chat using the transcript
102
+ # Assuming chat method can handle initial user message as text
103
+ response = next(self.chat({'text': transcript}, [], job_role, company))
104
+ return response
105
+
106
+ def transcribe_audio(self, audio_file_path):
107
+ # Read the audio file
108
+ with open(audio_file_path, "rb") as audio_file:
109
+ audio_data = audio_file.read()
110
+
111
+ # Use OpenAI's API to transcribe the audio
112
+ response = openai.Speech.create(
113
+ audio=audio_data,
114
+ model="whisper-large" # or whichever model is preferred
115
+ )
116
+
117
+ # Extract and return the transcription text
118
+ transcript = response['data']['text']
119
+ return transcript
120
+
121
  # Creating the Gradio interface
122
  with gr.Blocks() as demo:
123
  mock_interviewer = MockInterviewer()
124
 
125
+ with gr.Row():
126
+ job_role = gr.Textbox(label='Job Role', placeholder='Product Manager')
127
+ company = gr.Textbox(label='Company', placeholder='Amazon')
128
+ usr_audio = gr.Audio(source="microphone", type="filepath", label="Record or Upload Audio")
129
+
130
+ submit_btn = gr.Button("Submit")
131
+
132
+ response_output = gr.Textbox(label="Interviewer Response")
133
+
134
+ # Use transcribe_and_chat when audio is provided
135
+ submit_btn.click(
136
+ fn=mock_interviewer.transcribe_and_chat,
137
+ inputs=[usr_audio, job_role, company],
138
+ outputs=response_output
139
+ )
140
+
141
  chat_interface = gr.ChatInterface(
142
+ fn=lambda usr_message, history, job_role, company: mock_interviewer.chat(usr_message, history, job_role, company),
143
+ additional_inputs=[job_role, company],
144
+ title='I am your AI mock interviewer',
 
 
 
145
  description='Make your selections above to configure me.',
146
  multimodal=True,
147
  retry_btn=None,
148
+ undo_btn=None
149
+ ).queue()
150
+
151
  chat_interface.load(mock_interviewer.clear_thread)
152
  chat_interface.clear_btn.click(mock_interviewer.clear_thread)
153