Resume / app.py
robertselvam's picture
Update app.py
742f3e7 verified
import os
from openai import AzureOpenAI
import PyPDF2
import gradio as gr
import docx
import re
class Resume_Overall:
def __init__(self):
self.client = AzureOpenAI(api_key=os.getenv("AZURE_OPENAI_KEY"),
api_version="2023-07-01-preview",
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
)
def extract_text_from_file(self,file_path):
# Get the file extension
file_extension = os.path.splitext(file_path)[1]
if file_extension == '.pdf':
with open(file_path, 'rb') as file:
# Create a PDF file reader object
reader = PyPDF2.PdfFileReader(file)
# Create an empty string to hold the extracted text
extracted_text = ""
# Loop through each page in the PDF and extract the text
for page_number in range(reader.getNumPages()):
page = reader.getPage(page_number)
extracted_text += page.extractText()
return extracted_text
elif file_extension == '.txt':
with open(file_path, 'r') as file:
# Just read the entire contents of the text file
return file.read()
elif file_extension == '.docx':
doc = docx.Document(file_path)
text = []
for paragraph in doc.paragraphs:
text.append(paragraph.text)
return '\n'.join(text)
else:
return "Unsupported file type"
def course_response(self,resume_path):
resume_path = resume_path.name
resume = self.extract_text_from_file(resume_path)
# Define the prompt or input for the model
conversation = [
{"role": "system", "content": "You are a Resume Assistant."},
{"role": "user", "content": f"""Analyze the resume to generate online courses with website links to improve skills following resume delimitted by triple backticks. Generate atmost five courses.
result format should be:
course:[course].
website link:[website link]
```{resume}```
"""}
]
# Generate a response from the GPT-3 model
chat_completion = self.client.chat.completions.create(
model = "GPT-3",
messages = conversation,
max_tokens=200,
temperature=0,
n=1,
stop=None,
)
# Extract the generated text from the API response
generated_text = chat_completion.choices[0].message.content
return generated_text
def summary_response(self,resume_path):
resume_path = resume_path.name
resume = self.extract_text_from_file(resume_path)
# Define the prompt or input for the model
conversation = [
{"role": "system", "content": "You are a Resume Summarizer."},
{"role": "user", "content": f"""Analyze the resume to write the summary for following resume delimitted by triple backticks.
```{resume}```
"""}
]
# Generate a response from the GPT-3 model
chat_completion = self.client.chat.completions.create(
model = "GPT-3",
messages = conversation,
max_tokens=200,
temperature=0,
n=1,
stop=None,
)
# Extract the generated text from the API response
generated_text = chat_completion.choices[0].message.content
return generated_text
def skill_response(self,job_description_path):
job_description_path = job_description_path.name
resume = self.extract_text_from_file(job_description_path)
# Define the prompt or input for the model
conversation = [
{"role": "system", "content": "You are a Resume Assistant."},
{"role": "user", "content": f"""Find Education Gaps in given resume. Find Skills in resume.
```{resume}```
"""}
]
# Generate a response from the GPT-3 model
chat_completion = self.client.chat.completions.create(
model = "GPT-3", # Choose the GPT-3 engine you want to use
messages = conversation,
max_tokens=100, # Set the maximum number of tokens in the generated response
temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused
n=1, # Generate a single response
stop=None, # Specify an optional stop sequence to limit the length of the response
)
# Extract the generated text from the API response
generated_text = chat_completion.choices[0].message.content
return generated_text
def _generate_job_list(self, resume: str) -> str:
conversation = [
{"role": "system", "content": "You are a Resume Assistant."},
{"role": "user", "content": f"List out perfect job roles for based on resume informations:{resume}"}
]
chat_completion = self.client.chat.completions.create(
model = "GPT-3",
messages = conversation,
max_tokens=100,
temperature=0,
n=1,
stop=None,
)
generated_text = chat_completion.choices[0].message.content
return generated_text
def job_list_interface(self, file) -> str:
resume_text = self.extract_text_from_file(file.name)
job_list = self._generate_job_list(resume_text)
return job_list
def generate_job_description(self, role, experience):
# Generate a response from the GPT-3 model
conversation = [
{"role": "system", "content": "You are a Resume Assistant."},
{"role": "user", "content": f"""Your task is generate Job description for this {role} with {experience} years of experience.
Job Description Must have
1. Job Title
2. Job Summary : [200 words]
3. Responsibilities : Five Responsibilities in five lines
4. Required Skills : Six Skills
5. Qualifications
These topics must have in that Generated Job Description.
"""}
]
chat_completion = self.client.chat.completions.create(
model = "GPT-3", # Choose the GPT-3 engine you want to use
messages = conversation,
max_tokens=500, # Set the maximum number of tokens in the generated response
temperature=0.5, # Controls the randomness of the output. Higher values = more random, lower values = more focused
)
# Extract the generated text from the API response
generated_text = chat_completion.choices[0].message.content
return generated_text
def response(self,job_description_path):
job_description_path = job_description_path.name
job_description = self.extract_text_from_file(job_description_path)
# Define the prompt or input for the model
conversation = [
{"role": "system", "content": "You are a Resume Assistant."},
{"role": "user", "content": f"""Generate interview questions for screening following job_description delimitted by triple backticks. Generate atmost ten questions.
```{job_description}```
"""}
]
# Generate a response from the GPT-3 model
chat_completion = self.client.chat.completions.create(
model = "GPT-3", # Choose the GPT-3 engine you want to use
messages = conversation,
max_tokens=200, # Set the maximum number of tokens in the generated response
temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused
n=1, # Generate a single response
stop=None, # Specify an optional stop sequence to limit the length of the response
)
# Extract the generated text from the API response
generated_text = chat_completion.choices[0].message.content
return generated_text
def show_file(self,file_path):
return file_path.name
def launch_gradio_interface(self, share: bool = True):
with gr.Blocks(css="style.css",theme='freddyaboulton/test-blue') as app:
with gr.Tab("Resume"):
with gr.Row(elem_id="col-container"):
with gr.Column(scale=0.70):
file_output = gr.File(elem_classes="filenameshow")
with gr.Column(scale=0.30):
upload_button = gr.UploadButton(
"Browse File",file_types=[".txt", ".pdf", ".doc", ".docx",".json",".csv"],
elem_classes="uploadbutton",scale=0.60)
with gr.TabItem("Designation"):
with gr.Column(elem_id = "col-container",scale=0.80):
btn = gr.Button(value="Submit")
output_text = gr.Textbox(label="Designation List",lines=8)
with gr.TabItem("Summarized"):
with gr.Column(elem_id = "col-container",scale=0.80):
analyse = gr.Button("Analyze")
summary_result = gr.Textbox(label="Summarized",lines=8)
with gr.TabItem("Skills and Education Gaps"):
with gr.Column(elem_id = "col-container",scale=0.80):
analyse_resume = gr.Button("Analyze Resume")
result = gr.Textbox(label="Skills and Education Gaps",lines=8)
with gr.TabItem("Course"):
with gr.Column(elem_id = "col-container",scale=0.80):
course_analyse = gr.Button("Find Courses")
course_result = gr.Textbox(label="Suggested Cources",lines=8)
upload_button.upload(self.show_file,upload_button,file_output)
course_analyse.click(self.course_response, [upload_button], course_result)
analyse_resume.click(self.skill_response, [upload_button], result)
btn.click(self.job_list_interface, upload_button, output_text)
analyse.click(self.summary_response, [upload_button], summary_result)
with gr.Tab("Job Description"):
with gr.Row(elem_id="col-container"):
with gr.Column(scale=0.70):
file_output1 = gr.File(elem_classes="filenameshow")
with gr.Column(scale=0.30):
upload_button1 = gr.UploadButton(
"Browse File",file_types=[".txt", ".pdf", ".doc", ".docx",".json",".csv"],
elem_classes="uploadbutton")
with gr.TabItem("Screening Question"):
with gr.Row(elem_id="col-container"):
jd_btn = gr.Button(value="Submit")
with gr.Row(elem_id="col-container"):
output_text1 = gr.Textbox(label="Screening Question")
with gr.TabItem("Generate JD"):
with gr.Row(elem_id="col-container"):
with gr.Column(scale=0.50):
rolls = gr.Textbox(label="Role",scale=0.60)
with gr.Column(scale=0.50):
experience = gr.Textbox(label="Experience",scale=0.60)
with gr.Row(elem_id="col-container"):
get_jd_btn = gr.Button("Generate JD")
with gr.Row(elem_id="col-container"):
result_jd = gr.Textbox(label="Job Description",lines=8)
get_jd_btn.click(self.generate_job_description, [rolls,experience], result_jd)
upload_button1.upload(self.show_file,upload_button1,file_output1)
jd_btn.click(self.response,[upload_button1], output_text1)
app.launch(debug=True)
if __name__ == "__main__":
resume_overall = Resume_Overall()
resume_overall.launch_gradio_interface()