Spaces:
Running
Running
Johnny
commited on
Commit
·
f3f0a69
1
Parent(s):
2600a8c
added modular functions, config, model
Browse files- __pycache__/database.cpython-311.pyc +0 -0
- __pycache__/model.cpython-311.pyc +0 -0
- ai_model.py +13 -0
- app.py +28 -16
- config.py +12 -0
- crewai.py +0 -30
- database.py +18 -11
- pdf_generator.py +16 -0
- resume_parser.py +9 -0
__pycache__/database.cpython-311.pyc
ADDED
Binary file (1.38 kB). View file
|
|
__pycache__/model.cpython-311.pyc
ADDED
Binary file (720 Bytes). View file
|
|
ai_model.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import json
|
3 |
+
from config import HUGGINGFACE_API_URL, HUGGINGFACE_API_KEY
|
4 |
+
|
5 |
+
HEADERS = {"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"}
|
6 |
+
|
7 |
+
def analyze_resume(text):
|
8 |
+
"""Send resume text to Google Gemini model via Hugging Face API."""
|
9 |
+
data = {"inputs": text}
|
10 |
+
response = requests.post(HUGGINGFACE_API_URL, headers=HEADERS, data=json.dumps(data))
|
11 |
+
if response.status_code == 200:
|
12 |
+
return response.json()["output"]
|
13 |
+
return {"score": 0, "summary": "Error processing resume"}
|
app.py
CHANGED
@@ -1,24 +1,36 @@
|
|
1 |
import streamlit as st
|
2 |
-
import
|
3 |
-
from
|
4 |
-
from
|
|
|
|
|
5 |
|
6 |
st.title("AI-Powered Resume Screening")
|
7 |
|
8 |
-
uploaded_files = st.file_uploader("Upload Resumes",
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
results = []
|
14 |
|
15 |
-
for
|
16 |
-
resume_text =
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
save_resume_data(resume_text, parsed_details, ranking_score)
|
21 |
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from database import get_db, Candidate
|
3 |
+
from resume_parser import extract_text_from_pdf
|
4 |
+
from ai_model import analyze_resume
|
5 |
+
from pdf_generator import generate_summary_pdf
|
6 |
+
from sqlalchemy.orm import Session
|
7 |
|
8 |
st.title("AI-Powered Resume Screening")
|
9 |
|
10 |
+
uploaded_files = st.file_uploader("Upload Resumes", type=["pdf"], accept_multiple_files=True)
|
11 |
|
12 |
+
if uploaded_files:
|
13 |
+
db: Session = next(get_db())
|
14 |
+
candidates = []
|
|
|
15 |
|
16 |
+
for file in uploaded_files:
|
17 |
+
resume_text = extract_text_from_pdf(file)
|
18 |
+
analysis_result = analyze_resume(resume_text)
|
19 |
+
score = int(analysis_result.get("score", 0))
|
20 |
+
summary = analysis_result.get("summary", "No summary available")
|
|
|
21 |
|
22 |
+
new_candidate = Candidate(name=file.name, score=score, summary=summary, resume_text=resume_text)
|
23 |
+
db.add(new_candidate)
|
24 |
+
candidates.append(new_candidate)
|
25 |
+
|
26 |
+
db.commit()
|
27 |
+
|
28 |
+
# Generate shortlist
|
29 |
+
shortlisted = sorted(candidates, key=lambda x: x.score, reverse=True)[:5]
|
30 |
+
pdf_path = generate_summary_pdf(shortlisted)
|
31 |
+
|
32 |
+
st.success("Top candidates shortlisted!")
|
33 |
+
st.download_button("Download Shortlist PDF", open(pdf_path, "rb"), file_name="shortlisted_candidates.pdf")
|
34 |
|
35 |
+
# the link expiration time is set to 24 hours
|
36 |
+
# file format output to pdf.
|
config.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
|
4 |
+
# Load environment variables from .env file
|
5 |
+
load_dotenv()
|
6 |
+
|
7 |
+
# Retrieve environment variables
|
8 |
+
DATABASE_URL = os.getenv(postgresql://@localhost:5433/candidate)
|
9 |
+
HUGGINGFACE_API_URL = os.getenv("HUGGINGFACE_API_URL")
|
10 |
+
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
|
11 |
+
if not DATABASE_URL or not HUGGINGFACE_API_URL or not HUGGINGFACE_API_KEY:
|
12 |
+
raise ValueError("One or more environment variables are missing. Please check your .env file.")
|
crewai.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
from crewai import Crew, Agent, Task
|
2 |
-
from huggingface_hub import InferenceClient
|
3 |
-
import os
|
4 |
-
|
5 |
-
HF_API_URL = "https://api-inference.huggingface.co/models/YOUR_MODEL"
|
6 |
-
HF_API_KEY = "your_api_key"
|
7 |
-
|
8 |
-
client = InferenceClient(token=HF_API_KEY)
|
9 |
-
|
10 |
-
class ResumeAgents:
|
11 |
-
@staticmethod
|
12 |
-
def parse_resume(resume_text):
|
13 |
-
"""Agent to extract key resume details"""
|
14 |
-
prompt = f"Extract skills, experience, and education from this resume:\n{resume_text}"
|
15 |
-
response = client.text_generation(prompt)
|
16 |
-
return response
|
17 |
-
|
18 |
-
@staticmethod
|
19 |
-
def rank_resume(resume_details, job_description):
|
20 |
-
"""Agent to rank the resume against the job description"""
|
21 |
-
prompt = f"Rank this resume based on the job description:\nJob: {job_description}\nResume: {resume_details}"
|
22 |
-
response = client.text_generation(prompt)
|
23 |
-
return response
|
24 |
-
|
25 |
-
@staticmethod
|
26 |
-
def recommend_candidates(resume_rankings):
|
27 |
-
"""Agent to recommend the best candidates"""
|
28 |
-
prompt = f"Recommend the top candidates based on rankings:\n{resume_rankings}"
|
29 |
-
response = client.text_generation(prompt)
|
30 |
-
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
database.py
CHANGED
@@ -1,14 +1,21 @@
|
|
1 |
-
import
|
2 |
-
from sqlalchemy import
|
3 |
-
|
4 |
-
DATABASE_URL = "postgresql://user:password@localhost:5432/resumes_db"
|
5 |
|
|
|
|
|
6 |
engine = create_engine(DATABASE_URL)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
with engine.connect() as conn:
|
11 |
-
conn.execute(
|
12 |
-
"INSERT INTO resumes (resume_text, parsed_details, ranking_score) VALUES (%s, %s, %s)",
|
13 |
-
(resume_text, parsed_details, ranking_score)
|
14 |
-
)
|
|
|
1 |
+
from sqlalchemy import create_engine, Column, Integer, String, Text
|
2 |
+
from sqlalchemy.orm import sessionmaker, declarative_base
|
3 |
+
from config import DATABASE_URL # Import from config.py
|
|
|
4 |
|
5 |
+
# Set up SQLAlchemy
|
6 |
+
Base = declarative_base()
|
7 |
engine = create_engine(DATABASE_URL)
|
8 |
+
SessionLocal = sessionmaker(bind=engine)
|
9 |
+
|
10 |
+
# Define Candidate Model
|
11 |
+
class Candidate(Base):
|
12 |
+
__tablename__ = "candidates"
|
13 |
+
id = Column(Integer, primary_key=True, index=True)
|
14 |
+
name = Column(String, index=True)
|
15 |
+
score = Column(Integer)
|
16 |
+
summary = Column(Text)
|
17 |
+
resume_text = Column(Text)
|
18 |
+
pdf_link = Column(String)
|
19 |
|
20 |
+
# Create tables
|
21 |
+
Base.metadata.create_all(bind=engine)
|
|
|
|
|
|
|
|
|
|
pdf_generator.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from reportlab.pdfgen import canvas
|
2 |
+
import tempfile
|
3 |
+
|
4 |
+
def generate_summary_pdf(candidates):
|
5 |
+
"""Generate a PDF report for shortlisted candidates."""
|
6 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
|
7 |
+
c = canvas.Canvas(temp_file.name)
|
8 |
+
|
9 |
+
c.drawString(100, 800, "Shortlisted Candidates")
|
10 |
+
y = 780
|
11 |
+
for candidate in candidates:
|
12 |
+
y -= 20
|
13 |
+
c.drawString(100, y, f"{candidate.name} - Score: {candidate.score}")
|
14 |
+
|
15 |
+
c.save()
|
16 |
+
return temp_file.name # Return path to the PDF
|
resume_parser.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import fitz # PyMuPDF
|
2 |
+
|
3 |
+
def extract_text_from_pdf(pdf_file):
|
4 |
+
"""Extract text from a given PDF file."""
|
5 |
+
text = ""
|
6 |
+
with fitz.open(pdf_file) as doc:
|
7 |
+
for page in doc:
|
8 |
+
text += page.get_text()
|
9 |
+
return text
|