Spaces:
Configuration error
Configuration error
'''Agent responsible for writing the resume based on user provided context''' | |
import ast | |
import json | |
import logging | |
import os | |
from openai import OpenAI | |
from configuration import ( | |
INFERENCE_URL, | |
WRITER_INSTRUCTIONS, | |
WRITER_MODEL, | |
REPO_SELECTION_PROMPT, | |
PROJECTS_SECTION_PROMPT | |
) | |
# pylint: disable=broad-exception-caught | |
def write_resume(linkedin_resume: dict, github_repositories: list, job_call: dict) -> str: | |
""" | |
Generates a resume based on the provided content. | |
Args: | |
linkedin_resume (dict): Resume content extracted from linkedin profile. | |
github_repositories (dict): Information about the applicants GitHub repositories. | |
job_summary (dict): Extracted/summarized job call information. | |
Returns: | |
str: The generated resume. | |
""" | |
logger = logging.getLogger(f'{__name__}.write_resume') | |
logger.info("Selecting relevant GitHub repositories based on job call") | |
project_repos = _choose_repositories(github_repositories, job_call) | |
logger.info("Writing projects section of the resume") | |
projects = _write_projects_section(project_repos, job_call) | |
# Let the model select the most relevant repositories based on the job call | |
client = OpenAI( | |
base_url=INFERENCE_URL, | |
api_key=os.environ.get("API_KEY", "dummy-key-for-testing") | |
) | |
prompt = f'JOB CALL\n{job_call}\nLINKEDIN RESUME\n{linkedin_resume}\nPROJECTS\n{projects}' | |
messages = [ | |
{ | |
'role': 'system', | |
'content': WRITER_INSTRUCTIONS | |
}, | |
{ | |
'role': 'user', | |
'content': prompt | |
} | |
] | |
completion_args = { | |
'model': WRITER_MODEL, | |
'messages': messages, | |
} | |
try: | |
response = client.chat.completions.create(**completion_args) | |
except Exception as e: | |
response = None | |
logger.error('Error during job summarization API call: %s', e) | |
if response is not None: | |
response = response.choices[0].message.content | |
# Create data directory if it doesn't exist | |
data_dir = 'data' | |
if not os.path.exists(data_dir): | |
os.makedirs(data_dir) | |
logger.info("Created data directory: %s", data_dir) | |
# Save the resume to resume.md in the data directory | |
resume_file_path = os.path.join(data_dir, 'resume.md') | |
try: | |
with open(resume_file_path, 'w', encoding='utf-8') as f: | |
f.write(response) | |
logger.info("Resume saved to: %s", resume_file_path) | |
except Exception as e: | |
logger.error("Failed to save resume to file: %s", e) | |
return response | |
def _choose_repositories(github_repositories: list, job_call: dict) -> list: | |
""" | |
Choose relevant GitHub repositories based on the job call requirements. | |
Args: | |
github_repositories (dict): Information about the applicants GitHub repositories. | |
job_call (dict): Extracted/summarized job call information. | |
Returns: | |
list: Filtered list of relevant repositories. | |
""" | |
logger = logging.getLogger(f'{__name__}._choose_repositories') | |
# Create a new repo list without the full README text - this way we can save on input tokens | |
# by only sending the model the repo metadata, title, description, topics, etc. | |
repo_data = [ | |
{k: v for k, v in d.items() if k != 'readme'} | |
for d in github_repositories | |
] | |
# Let the model select the most relevant repositories based on the job call | |
client = OpenAI( | |
base_url=INFERENCE_URL, | |
api_key=os.environ.get("API_KEY", "dummy-key-for-testing") | |
) | |
messages = [ | |
{ | |
'role': 'system', | |
'content': f'{REPO_SELECTION_PROMPT}' | |
}, | |
{ | |
'role': 'user', | |
'content': f'JOB CALL\n{json.dumps(job_call)}\n\nREPOSITORIES\n{json.dumps(repo_data)}' | |
} | |
] | |
completion_args = { | |
'model': WRITER_MODEL, | |
'messages': messages, | |
} | |
try: | |
response = client.chat.completions.create(**completion_args) | |
except Exception as e: | |
response = None | |
logger.error('Error during job summarization API call: %s', e) | |
if response is not None: | |
response = response.choices[0].message.content | |
response = ast.literal_eval(response) | |
# Now use the repository selection response to filter the repositories | |
selected_repos = [ | |
repo for repo in github_repositories if repo['name'] in response | |
] | |
return selected_repos | |
def _write_projects_section(project_repos: list, job_call: dict) -> str: | |
""" | |
Write the projects section of the resume based on selected GitHub repositories. | |
Args: | |
project_repos (list): List of relevant GitHub repositories. | |
job_call (dict): Extracted/summarized job call information. | |
Returns: | |
str: Formatted projects section for the resume. | |
""" | |
logger = logging.getLogger(f'{__name__}._write_projects_section') | |
# Let the model select the most relevant repositories based on the job call | |
client = OpenAI( | |
base_url=INFERENCE_URL, | |
api_key=os.environ.get("API_KEY", "dummy-key-for-testing") | |
) | |
messages = [ | |
{ | |
'role': 'system', | |
'content': f'{PROJECTS_SECTION_PROMPT}' | |
}, | |
{ | |
'role': 'user', | |
'content': (f'JOB CALL\n{json.dumps(job_call)}\n\n' + | |
f'REPOSITORIES\n{json.dumps(project_repos)}') | |
} | |
] | |
completion_args = { | |
'model': WRITER_MODEL, | |
'messages': messages, | |
} | |
try: | |
response = client.chat.completions.create(**completion_args) | |
except Exception as e: | |
response = None | |
logger.error('Error during job summarization API call: %s', e) | |
if response is not None: | |
response = response.choices[0].message.content | |
return response | |