project / app.py
Letsch22's picture
Update welcome message
413b381
raw
history blame
11.3 kB
import os
import random
from dataclasses import dataclass
from time import sleep
from typing import Dict, List, Generator, Optional
import cv2
import gradio as gr
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
@dataclass(eq=True, frozen=True)
class Config:
job_role: str
company: str
job_description: str
behavioral_count: int
technical_count: int
situational_count: int
case_count: int
class MockInterviewer:
def __init__(self) -> None:
self._client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])
self._assistant_id_cache: Dict[Config, str] = {}
self.clear_thread()
def chat_with_text(
self,
message: Dict,
history: List[List],
job_role: str,
company: str,
job_description: str,
behavioral_count: int,
technical_count: int,
situational_count: int,
case_count: int
) -> Generator:
config = Config(job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count)
yield self._chat(message, config)
def chat_with_video(
self,
video: str,
job_role: str,
company: str,
job_description: str,
behavioral_count: int,
technical_count: int,
situational_count: int,
case_count: int
) -> str:
with open(video, 'rb') as file:
transcriptions = self._client.audio.transcriptions.create(
model='whisper-1',
file=file,
)
video_frame_file_ids = self._extract_frames(video)
os.remove(video)
config = Config(job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count)
response = self._chat(transcriptions.text, config, video_frame_file_ids)
return [(transcriptions.text, response)]
def clear_thread(self) -> None:
print('Initializing new thread')
self._thread = self._client.beta.threads.create()
def _extract_frames(self, video_path: str) -> List[str]:
video = cv2.VideoCapture(video_path)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
counter = 0
for i in random.sample(range(num_frames), 10):
video.set(cv2.CAP_PROP_FRAME_COUNT, i)
success, frame = video.read()
if not success:
print('Error in video frame extraction')
break
cv2.imwrite(f'{counter}.jpg', frame)
counter += 1
video.release()
file_ids = []
for i in range(counter):
with open(f'{i}.jpg', 'rb') as image:
file = self._client.files.create(file=image, purpose='assistants')
file_ids.append(file.id)
os.remove(f'{i}.jpg')
return file_ids
def _chat(self, message: str, config: Config, video_frame_file_ids: List[str] = list()) -> str:
print('Started chat')
assistant_id = self._init_assistant(config)
return self._send_message(message, assistant_id, video_frame_file_ids)
def _send_message(self, message: str, assistant_id: str, video_frame_file_ids: List[str]) -> str:
self._client.beta.threads.messages.create(thread_id=self._thread.id, role='user', content=message, file_ids=video_frame_file_ids)
print('Message created')
run = self._client.beta.threads.runs.create(thread_id=self._thread.id, assistant_id=assistant_id)
print('Run created')
# Check if the Run requires action (function call)
while True:
run_status = self._client.beta.threads.runs.retrieve(thread_id=self._thread.id, run_id=run.id)
print(f'Run status: {run_status.status}')
if run_status.status == 'completed':
break
sleep(1) # Wait for a second before checking again
# Retrieve and return the latest message from the assistant
messages = self._client.beta.threads.messages.list(thread_id=self._thread.id)
response = messages.data[0].content[0].text.value
print(f'Assistant response: {response}')
return response
def _init_assistant(self, config: Config) -> str:
cache_key = config
if cache_key in self._assistant_id_cache:
print(f'Fetched from cache for key {cache_key}')
return self._assistant_id_cache.get(cache_key)
else:
print(f'Initializing new assistant for key {cache_key}')
assistant = self._client.beta.assistants.create(
name='Mock Interviewer',
instructions=self._generate_assistant_instructions(config),
model='gpt-4-0125-preview')
self._assistant_id_cache[cache_key] = assistant.id
return assistant.id
def _generate_assistant_instructions(self, config: Config) -> str:
if config.job_role and config.company:
purpose = f'You are AiMI, an AI mock interviewer for {config.job_role} roles at {config.company}.'
elif config.job_role:
purpose = f'You are AiMI, an AI mock interviewer for {config.job_role} roles.'
elif config.company:
purpose = f'You are AiMI, an AI mock interviewer for roles at {config.company}.'
else:
purpose = 'You are AiMI, an AI mock interviewer.'
if config.job_description:
specifics = f'Tailor your questions based on the following job posting: {config.job_description}.'
else:
specifics = ''
return f'''
{purpose} Please greet the candidate and begin the mock interview when the candidate sends you the first message. {specifics} Ask {config.behavioral_count} number of behavioral questions, {config.technical_count} number of technical questions, {config.situational_count} number of situational questions, and {config.case_count} number of case-like questions, one question at a time.
After the candidate gives a response, evaluate the response of the candidate by addressing the candidate as if you were giving feedback to them (i.e. address them as you). Keep in mind what your company values in candidates (if you have been assigned a company). Provide a detailed analysis of the candidate's response based on the question type. In your feedback, comment on 1) avoiding filler words and non-words such as um or like, 2) avoiding jargon, and 3) flow (ideas flow logically with clear transitions between main ideas).
The candidate may have included frames from a video recording of their response. If so, please analyze the provided images from a mock interview setting, focusing on the following key aspects to evaluate the subject's presentation and the interview environment. Provide recommendations for improvement (limit observations to be brief). Focus on these nonverbal criteria: Facial Expressions: Assess the subject's facial expressions, considering if they convey confidence, engagement, and professionalism. Offer insights into how facial expressions could impact the interviewer's perception. Energy: If they appear energetic (conveying energy to engage viewers). Please provide detailed feedback on each aspect, including what is done well and what could be enhanced to improve the subject's presentation and the overall interview setting. Also comment on the following briefly only if it really needs improvement. It is not necessary to comment on the following, only if it needs improvement: Lighting: Describe the quality and direction of the lighting in the image. Note whether the subject is well-lit, if there are any harsh shadows on the face, and if the background is appropriately illuminated. Apparel: Comment on the appropriateness of the subject's attire for a professional interview. Mention the colors, fit, and formality of the clothing, and suggest any adjustments if needed. Speaking Environment/Background: Analyze the speaking environment and background for any distractions or elements that could detract from the focus on the subject. Recommend changes to create a more neutral and professional background. Limit your complete comments on the candidate's video to 100 words.
Finally, rate the complete response (content and video) on a scale from 1 to 10, where 1 is inadequate and 10 is exceptional.
'''
mock_interviewer = MockInterviewer()
theme = gr.themes.Soft(
primary_hue="purple",
secondary_hue="fuchsia",
).set(
body_background_fill='*neutral_100',
body_background_fill_dark='*background_fill_secondary'
)
with gr.Blocks(theme=theme) as demo:
with gr.Row():
with gr.Column(variant='panel', scale=1):
logo = gr.Image('aimi_logo.png', interactive=False, show_label=False, show_download_button=False, show_share_button=False)
with gr.Accordion("Job Information", open=False):
job_role = gr.Textbox(label='Job Role', placeholder='Product Manager')
company = gr.Textbox(label='Company', placeholder='Amazon')
job_description = gr.TextArea(
label='Job Description',
placeholder='Key job responsibilities, basic qualifications, preferred qualifications, about the company, etc.'
)
with gr.Accordion("Question Preferences", open=False):
behavioral_count = gr.Slider(label="Behavioral", maximum=10, value=1, step=1)
technical_count = gr.Slider(label="Technical", maximum=10, value=1, step=1)
situational_count = gr.Slider(label="Situational", maximum=10, value=1, step=1)
case_count = gr.Slider(label="Case", maximum=10, value=1, step=1)
with gr.Column(variant='panel', scale=6):
chat_interface = gr.ChatInterface(
fn=mock_interviewer.chat_with_text,
additional_inputs=[job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count],
retry_btn=None,
undo_btn=None)
chat_interface.chatbot.value = [(None, "Hi! I'm AiMI, your AI Mock Interview Assistant. Fill in the job details on the left and choose your question types. Ready? Allow webcam access, hit the red button to record, and use the chat if you need help. Let’s get started!")]
chat_interface.chatbot.height = '70vh'
chat_interface.load(mock_interviewer.clear_thread)
chat_interface.clear_btn.click(mock_interviewer.clear_thread)
with gr.Column(variant='panel', scale=1):
video = gr.Video(sources='webcam', include_audio=True)
video.stop_recording(fn=mock_interviewer.chat_with_video,
inputs=[video, job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count],
outputs=[chat_interface.chatbot],
api_name=False).then(lambda: None, None, video, queue=False)
if __name__ == '__main__':
demo.launch()