Johnny commited on
Commit
56325dc
·
1 Parent(s): 8dad2e8

consolidated functions under utils.py, added config

Browse files
__pycache__/config.cpython-311.pyc ADDED
Binary file (855 Bytes). View file
 
__pycache__/utils.cpython-311.pyc ADDED
Binary file (4.44 kB). View file
 
ai_model.py DELETED
@@ -1,13 +0,0 @@
1
- import requests
2
- import json
3
- from config import HUGGINGFACE_API_URL, HUGGINGFACE_API_KEY
4
-
5
- HEADERS = {"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"}
6
-
7
- def analyze_resume(text):
8
- """Send resume text to Google Gemini model via Hugging Face API."""
9
- data = {"inputs": text}
10
- response = requests.post(HUGGINGFACE_API_URL, headers=HEADERS, data=json.dumps(data))
11
- if response.status_code == 200:
12
- return response.json()["output"]
13
- return {"score": 0, "summary": "Error processing resume"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py DELETED
@@ -1,35 +0,0 @@
1
- import streamlit as st
2
- from database import get_db, Candidate
3
- from resume_parser import extract_text_from_pdf
4
- from ai_model import analyze_resume
5
- from pdf_generator import generate_summary_pdf
6
- from sqlalchemy.orm import Session
7
-
8
- st.title("AI-Powered Resume Screening")
9
-
10
- uploaded_files = st.file_uploader("Upload Resumes", type=["pdf"], accept_multiple_files=True)
11
-
12
- if uploaded_files:
13
- db: Session = next(get_db())
14
- candidates = []
15
-
16
- for file in uploaded_files:
17
- resume_text = extract_text_from_pdf(file)
18
- analysis_result = analyze_resume(resume_text)
19
- score = int(analysis_result.get("score", 0))
20
- summary = analysis_result.get("summary", "No summary available")
21
-
22
- new_candidate = Candidate(name=file.name, score=score, summary=summary, resume_text=resume_text)
23
- db.add(new_candidate)
24
- candidates.append(new_candidate)
25
-
26
- db.commit()
27
-
28
- # Generate shortlist
29
- shortlisted = sorted(candidates, key=lambda x: x.score, reverse=True)[:5]
30
- pdf_path = generate_summary_pdf(shortlisted)
31
-
32
- st.success("Top candidates shortlisted!")
33
- st.download_button("Download Shortlist PDF", open(pdf_path, "rb"), file_name="shortlisted_candidates.pdf")
34
-
35
- # the link expiration time is set to 24 hours
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.py CHANGED
@@ -1,12 +1,18 @@
1
  import os
2
  from dotenv import load_dotenv
 
3
 
4
  # Load environment variables from .env file
5
  load_dotenv()
6
 
7
- # Retrieve environment variables
8
- DATABASE_URL = os.getenv("postgresql://@localhost:5433/candidate")
9
- HUGGINGFACE_API_URL = os.getenv("HUGGINGFACE_API_URL")
10
- HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
11
- if not DATABASE_URL or not HUGGINGFACE_API_URL or not HUGGINGFACE_API_KEY:
12
- raise ValueError("One or more environment variables are missing. Please check your .env file.")
 
 
 
 
 
 
1
  import os
2
  from dotenv import load_dotenv
3
+ from supabase import create_client
4
 
5
  # Load environment variables from .env file
6
  load_dotenv()
7
 
8
+ # Supabase API Config
9
+ SUPABASE_URL = "https://lmpazoxzucnlqqxjoihi.supabase.co"
10
+ SUPABASE_KEY = os.getenv("SUPABASE_API_KEY")
11
+ if not SUPABASE_KEY:
12
+ raise ValueError("SUPABASE_KEY is not set in the environment variables.")
13
+ supabase = create_client(SUPABASE_URL, SUPABASE_KEY)
14
+
15
+ # Hugging Face API Config
16
+ HF_API_URL = "https://api-inference.huggingface.co/models/google/gemma-7b"
17
+ HF_API_TOKEN = os.getenv("HF_API_TOKEN")
18
+ HF_HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"}
database.py DELETED
@@ -1,24 +0,0 @@
1
- from sqlalchemy import create_engine, Column, Integer, String, Text
2
- from sqlalchemy.orm import sessionmaker, declarative_base
3
- from config import DATABASE_URL # Import from config.py
4
-
5
- # Set up SQLAlchemy
6
- Base = declarative_base()
7
- engine = create_engine(DATABASE_URL)
8
- SessionLocal = sessionmaker(bind=engine)
9
-
10
- # Define Candidate Model
11
- class Candidate(Base):
12
- __tablename__ = "candidates"
13
- id = Column(Integer, primary_key=True, index=True)
14
- name = Column(String(255), index=True)
15
- score = Column(Integer)
16
- summary = Column(Text)
17
- email = Column(String(255), unique=True, index=True) # Add email to the schema
18
- resume_text = Column(Text)
19
- pdf_link = Column(String)
20
-
21
- # Create tables
22
- Base.metadata.create_all(bind=engine)
23
-
24
- # add email to the schema
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from utils import process_resumes, generate_pdf_report
3
+
4
+ def main():
5
+ st.title("AI Candidate Screening App")
6
+ job_description = st.text_area("Enter Job Description")
7
+ uploaded_files = st.file_uploader("Upload Resumes (PDF)", accept_multiple_files=True, type=["pdf"])
8
+
9
+ if st.button("Process Resumes"):
10
+ shortlisted = process_resumes(uploaded_files, job_description)
11
+ for candidate in shortlisted:
12
+ st.write(f"**{candidate['name']}** - Score: {candidate['score']}")
13
+
14
+ # Generate PDF Report
15
+ pdf_report = generate_pdf_report(shortlisted)
16
+ st.download_button("Download Shortlist Report", pdf_report, "shortlist.pdf")
17
+
18
+ if __name__ == "__main__":
19
+ main()
multi_crew/.gitignore DELETED
@@ -1,3 +0,0 @@
1
- .env
2
- __pycache__/
3
- .DS_Store
 
 
 
 
multi_crew/README.md DELETED
@@ -1,54 +0,0 @@
1
- # LatestAiDevelopment Crew
2
-
3
- Welcome to the LatestAiDevelopment Crew project, powered by [crewAI](https://crewai.com). This template is designed to help you set up a multi-agent AI system with ease, leveraging the powerful and flexible framework provided by crewAI. Our goal is to enable your agents to collaborate effectively on complex tasks, maximizing their collective intelligence and capabilities.
4
-
5
- ## Installation
6
-
7
- Ensure you have Python >=3.10 <3.13 installed on your system. This project uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
8
-
9
- First, if you haven't already, install uv:
10
-
11
- ```bash
12
- pip install uv
13
- ```
14
-
15
- Next, navigate to your project directory and install the dependencies:
16
-
17
- (Optional) Lock the dependencies and install them by using the CLI command:
18
- ```bash
19
- crewai install
20
- ```
21
- ### Customizing
22
-
23
- **Add your `OPENAI_API_KEY` into the `.env` file**
24
-
25
- - Modify `src/latest_ai_development/config/agents.yaml` to define your agents
26
- - Modify `src/latest_ai_development/config/tasks.yaml` to define your tasks
27
- - Modify `src/latest_ai_development/crew.py` to add your own logic, tools and specific args
28
- - Modify `src/latest_ai_development/main.py` to add custom inputs for your agents and tasks
29
-
30
- ## Running the Project
31
-
32
- To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
33
-
34
- ```bash
35
- $ crewai run
36
- ```
37
-
38
- This command initializes the latest-ai-development Crew, assembling the agents and assigning them tasks as defined in your configuration.
39
-
40
- This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
41
-
42
- ## Understanding Your Crew
43
-
44
- The latest-ai-development Crew is composed of multiple AI agents, each with unique roles, goals, and tools. These agents collaborate on a series of tasks, defined in `config/tasks.yaml`, leveraging their collective skills to achieve complex objectives. The `config/agents.yaml` file outlines the capabilities and configurations of each agent in your crew.
45
-
46
- ## Support
47
-
48
- For support, questions, or feedback regarding the LatestAiDevelopment Crew or crewAI.
49
- - Visit our [documentation](https://docs.crewai.com)
50
- - Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
51
- - [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
52
- - [Chat with our docs](https://chatg.pt/DWjSBZn)
53
-
54
- Let's create wonders together with the power and simplicity of crewAI.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
multi_crew/knowledge/user_preference.txt DELETED
@@ -1,4 +0,0 @@
1
- User name is John Doe.
2
- User is an AI Engineer.
3
- User is interested in AI Agents.
4
- User is based in San Francisco, California.
 
 
 
 
 
multi_crew/pyproject.toml DELETED
@@ -1,23 +0,0 @@
1
- [project]
2
- name = "latest_ai_development"
3
- version = "0.1.0"
4
- description = "latest-ai-development using crewAI"
5
- authors = [{ name = "Your Name", email = "you@example.com" }]
6
- requires-python = ">=3.10,<3.13"
7
- dependencies = [
8
- "crewai[tools]>=0.105.0,<1.0.0"
9
- ]
10
-
11
- [project.scripts]
12
- latest_ai_development = "latest_ai_development.main:run"
13
- run_crew = "latest_ai_development.main:run"
14
- train = "latest_ai_development.main:train"
15
- replay = "latest_ai_development.main:replay"
16
- test = "latest_ai_development.main:test"
17
-
18
- [build-system]
19
- requires = ["hatchling"]
20
- build-backend = "hatchling.build"
21
-
22
- [tool.crewai]
23
- type = "crew"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
multi_crew/src/latest_ai_development/__init__.py DELETED
File without changes
multi_crew/src/latest_ai_development/config/agents.yaml DELETED
@@ -1,19 +0,0 @@
1
- researcher:
2
- role: >
3
- {topic} Senior Data Researcher
4
- goal: >
5
- Uncover cutting-edge developments in {topic}
6
- backstory: >
7
- You're a seasoned researcher with a knack for uncovering the latest
8
- developments in {topic}. Known for your ability to find the most relevant
9
- information and present it in a clear and concise manner.
10
-
11
- reporting_analyst:
12
- role: >
13
- {topic} Reporting Analyst
14
- goal: >
15
- Create detailed reports based on {topic} data analysis and research findings
16
- backstory: >
17
- You're a meticulous analyst with a keen eye for detail. You're known for
18
- your ability to turn complex data into clear and concise reports, making
19
- it easy for others to understand and act on the information you provide.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
multi_crew/src/latest_ai_development/config/tasks.yaml DELETED
@@ -1,17 +0,0 @@
1
- research_task:
2
- description: >
3
- Conduct a thorough research about {topic}
4
- Make sure you find any interesting and relevant information given
5
- the current year is {current_year}.
6
- expected_output: >
7
- A list with 10 bullet points of the most relevant information about {topic}
8
- agent: researcher
9
-
10
- reporting_task:
11
- description: >
12
- Review the context you got and expand each topic into a full section for a report.
13
- Make sure the report is detailed and contains any and all relevant information.
14
- expected_output: >
15
- A fully fledged report with the main topics, each with a full section of information.
16
- Formatted as markdown without '```'
17
- agent: reporting_analyst
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
multi_crew/src/latest_ai_development/crew.py DELETED
@@ -1,62 +0,0 @@
1
- from crewai import Agent, Crew, Process, Task
2
- from crewai.project import CrewBase, agent, crew, task
3
-
4
- # If you want to run a snippet of code before or after the crew starts,
5
- # you can use the @before_kickoff and @after_kickoff decorators
6
- # https://docs.crewai.com/concepts/crews#example-crew-class-with-decorators
7
-
8
- @CrewBase
9
- class LatestAiDevelopment():
10
- """LatestAiDevelopment crew"""
11
-
12
- # Learn more about YAML configuration files here:
13
- # Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
14
- # Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
15
- agents_config = 'config/agents.yaml'
16
- tasks_config = 'config/tasks.yaml'
17
-
18
- # If you would like to add tools to your agents, you can learn more about it here:
19
- # https://docs.crewai.com/concepts/agents#agent-tools
20
- @agent
21
- def researcher(self) -> Agent:
22
- return Agent(
23
- config=self.agents_config['researcher'],
24
- verbose=True
25
- )
26
-
27
- @agent
28
- def reporting_analyst(self) -> Agent:
29
- return Agent(
30
- config=self.agents_config['reporting_analyst'],
31
- verbose=True
32
- )
33
-
34
- # To learn more about structured task outputs,
35
- # task dependencies, and task callbacks, check out the documentation:
36
- # https://docs.crewai.com/concepts/tasks#overview-of-a-task
37
- @task
38
- def research_task(self) -> Task:
39
- return Task(
40
- config=self.tasks_config['research_task'],
41
- )
42
-
43
- @task
44
- def reporting_task(self) -> Task:
45
- return Task(
46
- config=self.tasks_config['reporting_task'],
47
- output_file='report.md'
48
- )
49
-
50
- @crew
51
- def crew(self) -> Crew:
52
- """Creates the LatestAiDevelopment crew"""
53
- # To learn how to add knowledge sources to your crew, check out the documentation:
54
- # https://docs.crewai.com/concepts/knowledge#what-is-knowledge
55
-
56
- return Crew(
57
- agents=self.agents, # Automatically created by the @agent decorator
58
- tasks=self.tasks, # Automatically created by the @task decorator
59
- process=Process.sequential,
60
- verbose=True,
61
- # process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/
62
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
multi_crew/src/latest_ai_development/main.py DELETED
@@ -1,66 +0,0 @@
1
- #!/usr/bin/env python
2
- import sys
3
- import warnings
4
-
5
- from datetime import datetime
6
-
7
- from latest_ai_development.crew import LatestAiDevelopment
8
-
9
- warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
10
-
11
- # This main file is intended to be a way for you to run your
12
- # crew locally, so refrain from adding unnecessary logic into this file.
13
- # Replace with inputs you want to test with, it will automatically
14
- # interpolate any tasks and agents information
15
-
16
- def run():
17
- """
18
- Run the crew.
19
- """
20
- inputs = {
21
- 'topic': 'AI LLMs',
22
- 'current_year': str(datetime.now().year)
23
- }
24
-
25
- try:
26
- LatestAiDevelopment().crew().kickoff(inputs=inputs)
27
- except Exception as e:
28
- raise Exception(f"An error occurred while running the crew: {e}")
29
-
30
-
31
- def train():
32
- """
33
- Train the crew for a given number of iterations.
34
- """
35
- inputs = {
36
- "topic": "AI LLMs"
37
- }
38
- try:
39
- LatestAiDevelopment().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
40
-
41
- except Exception as e:
42
- raise Exception(f"An error occurred while training the crew: {e}")
43
-
44
- def replay():
45
- """
46
- Replay the crew execution from a specific task.
47
- """
48
- try:
49
- LatestAiDevelopment().crew().replay(task_id=sys.argv[1])
50
-
51
- except Exception as e:
52
- raise Exception(f"An error occurred while replaying the crew: {e}")
53
-
54
- def test():
55
- """
56
- Test the crew execution and returns the results.
57
- """
58
- inputs = {
59
- "topic": "AI LLMs",
60
- "current_year": str(datetime.now().year)
61
- }
62
- try:
63
- LatestAiDevelopment().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
64
-
65
- except Exception as e:
66
- raise Exception(f"An error occurred while testing the crew: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
multi_crew/src/latest_ai_development/tools/__init__.py DELETED
File without changes
multi_crew/src/latest_ai_development/tools/custom_tool.py DELETED
@@ -1,19 +0,0 @@
1
- from crewai.tools import BaseTool
2
- from typing import Type
3
- from pydantic import BaseModel, Field
4
-
5
-
6
- class MyCustomToolInput(BaseModel):
7
- """Input schema for MyCustomTool."""
8
- argument: str = Field(..., description="Description of the argument.")
9
-
10
- class MyCustomTool(BaseTool):
11
- name: str = "Name of my tool"
12
- description: str = (
13
- "Clear description for what this tool is useful for, your agent will need this information to use it."
14
- )
15
- args_schema: Type[BaseModel] = MyCustomToolInput
16
-
17
- def _run(self, argument: str) -> str:
18
- # Implementation goes here
19
- return "this is an example of a tool output, ignore it and move along."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pdf_generator.py DELETED
@@ -1,16 +0,0 @@
1
- from reportlab.pdfgen import canvas
2
- import tempfile
3
-
4
- def generate_summary_pdf(candidates):
5
- """Generate a PDF report for shortlisted candidates."""
6
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
7
- c = canvas.Canvas(temp_file.name)
8
-
9
- c.drawString(100, 800, "Shortlisted Candidates")
10
- y = 780
11
- for candidate in candidates:
12
- y -= 20
13
- c.drawString(100, y, f"{candidate.name} - Score: {candidate.score}")
14
-
15
- c.save()
16
- return temp_file.name # Return path to the PDF
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
resume_parser.py DELETED
@@ -1,9 +0,0 @@
1
- import fitz # PyMuPDF
2
-
3
- def extract_text_from_pdf(pdf_file):
4
- """Extract text from a given PDF file."""
5
- text = ""
6
- with fitz.open(pdf_file) as doc:
7
- for page in doc:
8
- text += page.get_text()
9
- return text
 
 
 
 
 
 
 
 
 
 
utils.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fitz # PyMuPDF for PDF processing
2
+ import requests
3
+ import json
4
+ import re
5
+ from io import BytesIO
6
+ import supabase
7
+ from config import SUPABASE_URL, SUPABASE_KEY, HF_API_TOKEN, HF_API_URL, HF_HEADERS
8
+
9
+ def parse_resume(pdf_file):
10
+ """Extracts text from a resume PDF."""
11
+ doc = fitz.open(stream=pdf_file.read(), filetype="pdf")
12
+ text = "\n".join([page.get_text("text") for page in doc])
13
+ return text
14
+
15
+ def extract_email(resume_text):
16
+ """Extracts an email address from resume text."""
17
+ match = re.search(r"[\w\.-]+@[\w\.-]+", resume_text)
18
+ return match.group(0) if match else None
19
+
20
+ def score_candidate(resume_text, job_description):
21
+ """Sends resume and job description to Hugging Face for scoring."""
22
+ payload = {"inputs": f"Resume: {resume_text}\nJob Description: {job_description}"}
23
+ response = requests.post(HF_API_URL, headers=HF_HEADERS, data=json.dumps(payload))
24
+ return response.json().get("score", 0)
25
+ # Debugging: Print response
26
+ if response.status_code != 200:
27
+ print(f"Error: {response.status_code}, {response.text}") # Log any errors
28
+ return 0 # Return default score if API fails
29
+
30
+ try:
31
+ return response.json().get("score", 0)
32
+ except requests.exceptions.JSONDecodeError:
33
+ print("Failed to decode JSON response:", response.text) # Debugging output
34
+ return 0 # Return default score if JSON decoding fails
35
+
36
+ def store_in_supabase(resume_text, score, candidate_name):
37
+ """Stores resume data in Supabase."""
38
+ email = extract_email(resume_text)
39
+ data = {
40
+ "name": candidate_name,
41
+ "resume": resume_text,
42
+ "score": score,
43
+ "email": email
44
+ }
45
+ supabase.table("candidates").insert(data).execute()
46
+
47
+ def generate_pdf_report(shortlisted_candidates):
48
+ """Generates a PDF summary of shortlisted candidates."""
49
+ pdf = BytesIO()
50
+ doc = fitz.open()
51
+ for candidate in shortlisted_candidates:
52
+ page = doc.new_page()
53
+ page.insert_text((50, 50), f"Candidate: {candidate['name']}\nEmail: {candidate['email']}\nScore: {candidate['score']}\nSummary: {candidate['summary']}")
54
+ doc.save(pdf)
55
+ pdf.seek(0)
56
+ return pdf
57
+
58
+ def process_resumes(uploaded_files, job_description):
59
+ """Processes uploaded resumes and returns shortlisted candidates."""
60
+ candidates = []
61
+ for pdf_file in uploaded_files:
62
+ resume_text = parse_resume(pdf_file)
63
+ score = score_candidate(resume_text, job_description)
64
+ email = extract_email(resume_text)
65
+ candidates.append({
66
+ "name": pdf_file.name,
67
+ "resume": resume_text,
68
+ "score": score,
69
+ "email": email
70
+ })
71
+ store_in_supabase(resume_text, score, pdf_file.name, email)
72
+ return sorted(candidates, key=lambda x: x["score"], reverse=True)[:5]