Spaces:
Running
Running
File size: 7,375 Bytes
a323a2f b098a37 a323a2f af4cf37 803595b af4cf37 a323a2f b098a37 803595b a323a2f b098a37 a323a2f 7f6c340 0ffd981 7f6c340 a323a2f 7f6c340 0f6f12a a323a2f b098a37 a323a2f af4cf37 a323a2f b098a37 a323a2f af4cf37 a323a2f b098a37 a323a2f af4cf37 a323a2f b098a37 a323a2f 803595b a323a2f 803595b a323a2f b098a37 a323a2f b098a37 7f6c340 b24fbb8 7f6c340 b098a37 b24fbb8 803595b 2dedcb1 803595b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
import streamlit as st
import os
import asyncio
import warnings
from crewai import Agent, Task, Crew
from langchain_google_genai import ChatGoogleGenerativeAI
from crewai_tools import FileReadTool, ScrapeWebsiteTool, SerperDevTool
import json
# Warning control
warnings.filterwarnings('ignore')
# Set the Google API key directly in the code
google_api_key = "AIzaSyCHHl4LgpZIBw3_Zf15XWuuLSWWIlLROdQ"
# Function to initialize the Gemini model with an event loop
async def initialize_llm():
return ChatGoogleGenerativeAI(
model="gemini-1.5-flash",
verbose=True,
temperature=0.5,
google_api_key=google_api_key
)
# Run the initialization within the event loop
if not hasattr(st, 'llm'):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
st.llm = loop.run_until_complete(initialize_llm())
# Ensure the SERPER API key is correctly passed during initialization
serper_api_key = "uxSCra7XOaiojmWo3MIQ2xzFWkX2306yJY7YIVMsT771qLCaOJejKKQB7nKbRXd7" # Replace with your actual API key
search_tool = SerperDevTool(api_key=serper_api_key)
# Initialize tools
scrape_tool = ScrapeWebsiteTool()
read_resume = FileReadTool(file_path='fake_resume.md')
# Define agents
researcher = Agent(
role="Tech Job Researcher",
goal="Make sure to do amazing analysis on job posting to help job applicants",
tools=[scrape_tool, search_tool],
verbose=True,
backstory=(
"As a Job Researcher, your prowess in navigating and extracting critical "
"information from job postings is unmatched. Your skills help pinpoint the necessary "
"qualifications and skills sought by employers, forming the foundation for effective application tailoring."
),
llm=st.llm
)
profiler = Agent(
role="Personal Profiler for Engineers",
goal="Do incredible research on job applicants to help them stand out in the job market",
tools=[scrape_tool, search_tool, read_resume],
verbose=True,
backstory=(
"Equipped with analytical prowess, you dissect and synthesize information "
"from diverse sources to craft comprehensive personal and professional profiles, laying the "
"groundwork for personalized resume enhancements."
),
llm=st.llm
)
resume_strategist = Agent(
role="Resume Strategist for Engineers",
goal="Find all the best ways to make a resume stand out in the job market.",
tools=[scrape_tool, search_tool, read_resume],
verbose=True,
backstory=(
"With a strategic mind and an eye for detail, you excel at refining resumes to highlight the most "
"relevant skills and experiences, ensuring they resonate perfectly with the job's requirements."
),
llm=st.llm
)
interview_preparer = Agent(
role="Engineering Interview Preparer",
goal="Create interview questions and talking points based on the resume and job requirements",
tools=[scrape_tool, search_tool, read_resume],
verbose=True,
backstory=(
"Your role is crucial in anticipating the dynamics of interviews. With your ability to formulate key questions "
"and talking points, you prepare candidates for success, ensuring they can confidently address all aspects of the "
"job they are applying for."
),
llm=st.llm
)
# Define tasks
research_task = Task(
description=(
"Analyze the job posting URL provided ({job_posting_url}) "
"to extract key skills, experiences, and qualifications required. Use the tools to gather content and identify "
"and categorize the requirements."
),
expected_output=(
"A structured list of job requirements, including necessary skills, qualifications, and experiences."
),
agent=researcher,
async_execution=True
)
profile_task = Task(
description=(
"Compile a detailed personal and professional profile using the GitHub ({github_url}) URLs, LinkedIn ({linkedin_url}) URL, and personal write-up "
"({personal_writeup}). Utilize tools to extract and synthesize information from these sources."
),
expected_output=(
"A comprehensive profile document that includes skills, project experiences, contributions, interests, and "
"communication style."
),
agent=profiler,
async_execution=True
)
resume_strategy_task = Task(
description=(
"Using the profile and job requirements obtained from previous tasks, tailor the resume to highlight the most "
"relevant areas. Employ tools to adjust and enhance the resume content. Make sure this is the best resume even but "
"don't make up any information. Update every section, including the initial summary, work experience, skills, "
"and education. All to better reflect the candidate's abilities and how it matches the job posting."
),
expected_output=(
"An updated resume that effectively highlights the candidate's qualifications and experiences relevant to the job."
),
output_file="tailored_resume.md",
context=[research_task, profile_task],
agent=resume_strategist
)
interview_preparation_task = Task(
description=(
"Create a set of potential interview questions and talking points based on the tailored resume and job requirements. "
"Utilize tools to generate relevant questions and discussion points. Make sure to use these questions and talking points to "
"help the candidate highlight the main points of the resume and how it matches the job posting."
),
expected_output=(
"A document containing key questions and talking points that the candidate should prepare for the initial interview."
),
output_file="interview_materials.md",
context=[research_task, profile_task, resume_strategy_task],
agent=interview_preparer
)
# Crew Setup
job_application_crew = Crew(
agents=[researcher, profiler, resume_strategist, interview_preparer],
tasks=[research_task, profile_task, resume_strategy_task, interview_preparation_task],
verbose=True
)
# Job Application Inputs
job_application_inputs = {
'job_posting_url': 'https://jobs.lever.co/AIFund/6c82e23e-d954-4dd8-a734-c0c2c5ee00f1?lever-origin=applied&lever-source%5B%5D=AI+Fund',
'github_url': 'https://github.com/PrathameshK',
'linkedin_url': 'https://www.linkedin.com/in/prathameshkhade/',
'personal_writeup': 'fake_resume.md'
}
# Running Tasks
try:
# Assuming job_application_crew.kickoff is an async function
results = job_application_crew.kickoff(inputs=job_application_inputs)
# Print type and contents for debugging
st.write("Type of results:", type(results))
st.write("Contents of results:", results)
# Check if results is a dictionary
if isinstance(results, dict):
# Display results based on their expected keys
st.write("Job Requirements:", results.get('job_requirements', 'No job requirements found'))
st.write("Personal Profile:", results.get('personal_profile', 'No personal profile found'))
st.write("Tailored Resume:", results.get('tailored_resume', 'No tailored resume found'))
st.write("Interview Materials:", results.get('interview_materials', 'No interview materials found'))
else:
st.write("Unexpected results format:", results)
except Exception as e:
st.error(f"An error occurred: {e}")
|