import os import urllib.request from dataclasses import dataclass from time import sleep from typing import Dict, List, Generator import gradio as gr from openai import OpenAI from dotenv import load_dotenv load_dotenv() @dataclass(eq=True, frozen=True) class Config: job_role: str company: str job_description: str behavioral_count: int technical_count: int situational_count: int case_count: int class MockInterviewer: def __init__(self) -> None: self._client = OpenAI(api_key=os.environ['OPENAI_API_KEY']) self._assistant_id_cache: Dict[Config, str] = {} self.clear_thread() def chat_with_text( self, message: Dict, history: List[List], job_role: str, company: str, job_description: str, behavioral_count: int, technical_count: int, situational_count: int, case_count: int ) -> Generator: config = Config(job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count) yield self._chat(message, config) def chat_with_video( self, video: str, job_role: str, company: str, job_description: str, behavioral_count: int, technical_count: int, situational_count: int, case_count: int ) -> str: with open(video, 'rb') as file: transcriptions = self._client.audio.transcriptions.create( model='whisper-1', file=file, ) os.remove(video) config = Config(job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count) response = self._chat(transcriptions.text, config) return [(transcriptions.text, response)] def clear_thread(self) -> None: print('Initializing new thread') self._thread = self._client.beta.threads.create() def _chat(self, message: str, config: Config) -> str: print('Started chat') assistant_id = self._init_assistant(config) return self._send_message(message, assistant_id) def _send_message(self, message: str, assistant_id: str) -> str: self._client.beta.threads.messages.create(thread_id=self._thread.id, role='user', content=message) print('Message created') run = self._client.beta.threads.runs.create(thread_id=self._thread.id, assistant_id=assistant_id) print('Run created') # Check if the Run requires action (function call) while True: run_status = self._client.beta.threads.runs.retrieve(thread_id=self._thread.id, run_id=run.id) print(f'Run status: {run_status.status}') if run_status.status == 'completed': break sleep(1) # Wait for a second before checking again # Retrieve and return the latest message from the assistant messages = self._client.beta.threads.messages.list(thread_id=self._thread.id) response = messages.data[0].content[0].text.value print(f'Assistant response: {response}') return response def _create_files(self, company: str) -> List[str]: if company.lower() == 'amazon': url = 'https://www.aboutamazon.com/about-us/leadership-principles' filename = 'leadership_principles.html' else: return [] filename, headers = urllib.request.urlretrieve(url, filename) with open(filename, 'rb') as file: assistant_file = self._client.files.create(file=file, purpose='assistants') file_ids = [assistant_file.id] os.remove(filename) return file_ids def _init_assistant(self, config: Config) -> str: cache_key = config if cache_key in self._assistant_id_cache: print(f'Fetched from cache for key {cache_key}') return self._assistant_id_cache.get(cache_key) else: print(f'Initializing new assistant for key {cache_key}') file_ids = self._create_files(config.company) assistant = self._client.beta.assistants.create( name='Mock Interviewer', instructions=self._generate_assistant_instructions(config), model='gpt-4-0125-preview', tools=[ { 'type': 'retrieval' # This adds the knowledge base as a tool } ], file_ids=file_ids) self._assistant_id_cache[cache_key] = assistant.id return assistant.id def _generate_assistant_instructions(self, config: Config) -> str: if config.job_role and config.company: purpose = f'You are Ami, an AI mock interviewer for {config.job_role} roles at {config.company}.' elif config.job_role: purpose = f'You are Ami, an AI mock interviewer for {config.job_role} roles.' elif config.company: purpose = f'You are Ami, an AI mock interviewer for roles at {config.company}.' else: purpose = 'You are Ami, an AI mock interviewer.' if config.job_description: specifics = f'Tailor your questions based on the following job posting: {config.job_description}.' else: specifics = '' return f"{purpose} Please state your purpose when the candidate sends you the first message. If you have been provided a file, use it as an interview guide. {specifics} Ask {config.behavioral_count} number of behavioral questions, {config.technical_count} number of technical questions, {config.situational_count} number of situational questions, and {config.case_count} number of case-like questions. After the candidate gives a response, evaluate the response of the candidate by addressing the candidate as if you were giving feedback to them (i.e. address them as you). Keep in mind what your company values in candidates. Provide a detailed analysis of the candidate's response based on the question type. Also, rate the response on a scale from 1 to 10, where 1 is inadequate and 10 is exceptional." mock_interviewer = MockInterviewer() theme = gr.themes.Soft( primary_hue="blue", secondary_hue="indigo", ).set( body_background_fill='*neutral_100', body_background_fill_dark='*background_fill_secondary' ) with gr.Blocks(theme=theme) as demo: with gr.Row(): with gr.Column(variant='panel', scale=1): config_title = gr.Markdown('### Interview Settings') with gr.Accordion("Job Information", open=False): job_role = gr.Textbox(label='Job Role', placeholder='Product Manager') company = gr.Textbox(label='Company', placeholder='Amazon') job_description = gr.TextArea( label='Job Description', info='Please copy and paste any relevant job description and information here:', placeholder='Key job responsibilities, basic qualifications, preferred qualifications, about the company, etc.' ) with gr.Accordion("Question Preferences", open=False): label='Question Type and Count' info='Please indicate how many questions you would like asked on the following question types:', behavioral_count = gr.Slider(label="Behavioral", maximum=10, value=1, step=1) technical_count = gr.Slider(label="Technical", maximum=10, value=1, step=1) situational_count = gr.Slider(label="Situational", maximum=10, value=1, step=1) case_count = gr.Slider(label="Case", maximum=10, value=1, step=1) with gr.Column(variant='panel', scale=6): chat_interface = gr.ChatInterface( fn=mock_interviewer.chat_with_text, title="Hi! I'm Ami, your AI Mock Interviewer.", description='You can begin by clicking record and introducing yourself!', additional_inputs=[job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count], retry_btn=None, undo_btn=None) chat_interface.load(mock_interviewer.clear_thread) chat_interface.clear_btn.click(mock_interviewer.clear_thread) video = gr.Video(sources='webcam', include_audio=True) video.stop_recording(fn=mock_interviewer.chat_with_video, inputs=[video, job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count], outputs=[chat_interface.chatbot], api_name=False).then(lambda:None, None, video, queue=False) if __name__ == '__main__': demo.launch()