|
import os |
|
import time |
|
import tempfile |
|
import gradio as gr |
|
import warnings |
|
from pathlib import Path |
|
import PyPDF2 |
|
import markdown |
|
from datetime import datetime, timedelta |
|
from collections import defaultdict |
|
import threading |
|
|
|
warnings.filterwarnings('ignore') |
|
|
|
|
|
class RateLimiter: |
|
def __init__(self, max_requests=5, time_window=60): |
|
self.max_requests = max_requests |
|
self.time_window = time_window |
|
self.requests = defaultdict(list) |
|
self.lock = threading.Lock() |
|
|
|
def is_allowed(self, user_id): |
|
with self.lock: |
|
now = datetime.now() |
|
|
|
self.requests[user_id] = [ |
|
req_time for req_time in self.requests[user_id] |
|
if now - req_time < timedelta(seconds=self.time_window) |
|
] |
|
|
|
if len(self.requests[user_id]) >= self.max_requests: |
|
return False |
|
|
|
self.requests[user_id].append(now) |
|
return True |
|
|
|
|
|
rate_limiter = RateLimiter(max_requests=3, time_window=300) |
|
|
|
def extract_text_from_pdf(pdf_file): |
|
"""Extract text from uploaded PDF file.""" |
|
try: |
|
reader = PyPDF2.PdfReader(pdf_file) |
|
text = "" |
|
for page in reader.pages: |
|
text += page.extract_text() + "\n" |
|
return text.strip() |
|
except Exception as e: |
|
return f"Error reading PDF: {str(e)}" |
|
|
|
def setup_crewai(): |
|
"""Initialize CrewAI components.""" |
|
try: |
|
from crewai import Agent, Task, Crew |
|
from crewai_tools import ScrapeWebsiteTool, SerperDevTool |
|
from langchain_openai import ChatOpenAI |
|
|
|
|
|
search_tool = SerperDevTool() |
|
scrape_tool = ScrapeWebsiteTool() |
|
|
|
|
|
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.3) |
|
|
|
|
|
researcher = Agent( |
|
role="Job Requirements Analyst", |
|
goal="Extract and analyze key job requirements efficiently", |
|
tools=[scrape_tool, search_tool], |
|
verbose=False, |
|
backstory="Expert at quickly identifying essential job requirements and qualifications from job postings.", |
|
llm=llm, |
|
) |
|
|
|
resume_strategist = Agent( |
|
role="Resume Enhancement Specialist", |
|
goal="Optimize resumes to match job requirements effectively", |
|
tools=[], |
|
verbose=False, |
|
backstory="Skilled at tailoring resumes to highlight relevant experience and skills for specific job applications.", |
|
llm=llm, |
|
) |
|
|
|
return researcher, resume_strategist, llm |
|
|
|
except ImportError: |
|
raise Exception("CrewAI not installed. Please install required packages.") |
|
|
|
def create_tasks(researcher, resume_strategist, job_url, resume_text): |
|
"""Create optimized tasks for the crew.""" |
|
from crewai import Task |
|
|
|
|
|
research_task = Task( |
|
description=f""" |
|
Analyze the job posting at {job_url} and extract the top 10 most important: |
|
1. Required skills and technologies |
|
2. Key qualifications and experience levels |
|
3. Preferred background and certifications |
|
|
|
Focus on the most critical requirements only. |
|
""", |
|
expected_output="A concise list of the top 10 most important job requirements.", |
|
agent=researcher, |
|
) |
|
|
|
|
|
resume_task = Task( |
|
description=f""" |
|
Using the job requirements from the research task, optimize this resume: |
|
|
|
{resume_text} |
|
|
|
Instructions: |
|
1. Rewrite the professional summary to align with the job |
|
2. Highlight relevant experience and skills |
|
3. Adjust technical skills section to match requirements |
|
4. Ensure ATS-friendly formatting |
|
5. Keep the same factual information but present it strategically |
|
|
|
Return the complete optimized resume in markdown format. |
|
""", |
|
expected_output="A complete, optimized resume in markdown format tailored to the job requirements.", |
|
agent=resume_strategist, |
|
context=[research_task] |
|
) |
|
|
|
return research_task, resume_task |
|
|
|
def process_application(pdf_file, job_url, user_session): |
|
"""Main processing function with rate limiting.""" |
|
|
|
|
|
if not rate_limiter.is_allowed(user_session): |
|
return "β οΈ Rate limit exceeded. Please wait 5 minutes before submitting another request.", "" |
|
|
|
if not pdf_file or not job_url: |
|
return "β Please provide both a PDF resume and job URL.", "" |
|
|
|
try: |
|
|
|
with gr.Progress() as progress: |
|
progress(0.1, desc="Extracting text from PDF...") |
|
resume_text = extract_text_from_pdf(pdf_file) |
|
|
|
if "Error reading PDF" in resume_text: |
|
return f"β {resume_text}", "" |
|
|
|
progress(0.3, desc="Setting up AI agents...") |
|
researcher, resume_strategist, llm = setup_crewai() |
|
|
|
progress(0.5, desc="Creating optimization tasks...") |
|
research_task, resume_task = create_tasks(researcher, resume_strategist, job_url, resume_text) |
|
|
|
progress(0.7, desc="Analyzing job requirements...") |
|
|
|
from crewai import Crew |
|
crew = Crew( |
|
agents=[researcher, resume_strategist], |
|
tasks=[research_task, resume_task], |
|
verbose=False |
|
) |
|
|
|
progress(0.9, desc="Generating tailored resume...") |
|
result = crew.kickoff() |
|
|
|
progress(1.0, desc="Complete!") |
|
|
|
|
|
html_result = markdown.markdown(str(result)) |
|
|
|
return "β
Resume successfully tailored!", html_result |
|
|
|
except Exception as e: |
|
return f"β Error processing your request: {str(e)}", "" |
|
|
|
def create_interface(): |
|
"""Create the Gradio interface.""" |
|
|
|
with gr.Blocks( |
|
title="CV Tailor - AI Resume Optimizer", |
|
theme=gr.themes.Soft(), |
|
css=""" |
|
.gradio-container { |
|
max-width: 1200px; |
|
margin: auto; |
|
} |
|
.header { |
|
text-align: center; |
|
margin-bottom: 30px; |
|
} |
|
.rate-limit-info { |
|
background-color: #f0f8ff; |
|
padding: 10px; |
|
border-radius: 5px; |
|
margin-bottom: 20px; |
|
} |
|
""" |
|
) as app: |
|
|
|
gr.HTML(""" |
|
<div class="header"> |
|
<h1>π― CV Tailor - AI Resume Optimizer</h1> |
|
<p>Upload your PDF resume and job URL to get an AI-tailored resume that matches the job requirements!</p> |
|
</div> |
|
""") |
|
|
|
gr.HTML(""" |
|
<div class="rate-limit-info"> |
|
<strong>β‘ Rate Limit:</strong> 3 requests per 5 minutes to manage API costs. |
|
Please be patient and make each request count! |
|
</div> |
|
""") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
pdf_input = gr.File( |
|
label="π Upload Your Resume (PDF)", |
|
file_types=[".pdf"], |
|
file_count="single" |
|
) |
|
|
|
job_url_input = gr.Textbox( |
|
label="π Job Posting URL", |
|
placeholder="https://company.com/jobs/position", |
|
lines=1 |
|
) |
|
|
|
submit_btn = gr.Button( |
|
"π Generate Tailored Resume", |
|
variant="primary", |
|
size="lg" |
|
) |
|
|
|
|
|
gr.Examples( |
|
examples=[ |
|
["https://jobs.lever.co/example-company/software-engineer"], |
|
["https://www.linkedin.com/jobs/view/example-job-id"], |
|
["https://careers.google.com/jobs/results/example-position"] |
|
], |
|
inputs=job_url_input, |
|
label="π Example Job URLs" |
|
) |
|
|
|
with gr.Column(scale=2): |
|
status_output = gr.Textbox( |
|
label="π Status", |
|
interactive=False, |
|
lines=1 |
|
) |
|
|
|
result_output = gr.HTML( |
|
label="π Tailored Resume", |
|
value="Your optimized resume will appear here..." |
|
) |
|
|
|
|
|
submit_btn.click( |
|
fn=process_application, |
|
inputs=[pdf_input, job_url_input, gr.State(lambda: str(time.time()))], |
|
outputs=[status_output, result_output] |
|
) |
|
|
|
|
|
gr.HTML(""" |
|
<div style="text-align: center; margin-top: 50px; color: #666;"> |
|
<p>Powered by CrewAI & OpenAI GPT-4o Mini | |
|
<a href="https://github.com/joaomdmoura/crewAI" target="_blank">CrewAI</a> | |
|
Built with β€οΈ using Gradio</p> |
|
</div> |
|
""") |
|
|
|
return app |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
if not os.getenv("OPENAI_API_KEY"): |
|
print("β οΈ Warning: OPENAI_API_KEY not found in environment variables") |
|
|
|
app = create_interface() |
|
app.launch( |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
show_api=False, |
|
share=False |
|
) |