test / app.py
Amrrs's picture
Update app.py
c1a54a6
##############################
#### All library imports #####
##############################
import streamlit as st # web app
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords # Import the stop word list
from wordcloud import WordCloud
# stopwords.words("english")
from nltk import word_tokenize
from nltk.util import ngrams
import PyPDF2
import base64 # byte object into a pdf file
############# Web app streamlit page config ##########
st.set_page_config(
page_title = 'Resume enhancement by extracting keywords using NLP ',
page_icon = 'πŸ“–',
layout = 'wide'
)
st.title(" πŸ“• Resume enhancement by extracting keywords πŸ“ ")
st.subheader("πŸ“’ using NLP πŸ“’")
"""βœ… ** Downloading Models and Basic Setup **"""
##############
#### download NLTK models ##############
##############
nltk.download("popular")
nltk.download('stopwords')
lemmatizer = WordNetLemmatizer()
#########################################
###### read main resources files ########
#########################################
df=pd.read_csv('Resume_skills.csv')
df=df.drop(columns=['Unnamed: 0'])
file1=open('linkedin_skill','r')
skills=[(line.strip()).split(',')[0].lower() for line in file1.readlines()]
def sentence_maker(unique_words):
sentences=''
for i in unique_words:
sentences+=''.join(i.strip())+' '
return sentences
#stop_words = set(nltk.corpus.stopwords.words('english'))
def extract_skills(input_text):
res=[]
for i in input_text:
# generate bigrams and trigrams (such as artificial intelligence)
bigrams_trigrams = list(map(' '.join, nltk.everygrams(i, 2, 3)))
# we create a set to keep the results in.
found_skills = set()
# we search for each token in our skills database
for token in i:
if token.lower() in skills:
found_skills.add(token)
# print('2',found_skills)
# we search for each bigram and trigram in our skills database
for ngram in bigrams_trigrams:
if ngram.lower() in skills:
found_skills.add(ngram)
res.append(found_skills)
print(res)
return res
def clean_sentences(df,col_name):
reviews = []
for sent in (df[col_name]):
#remove html content
review_text = BeautifulSoup(sent).get_text()
#remove non-alphabetic characters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
#tokenize the sentences
words = word_tokenize(review_text.lower())
stops = set(stopwords.words("english"))
# 5. Remove stop words
meaningful_words = [w for w in words if not w in stops]
reviews.append(meaningful_words)
return(reviews)
def clean_sentences2(df,col_name):
reviews = []
for sent in (df[col_name]):
#remove html content
review_text = BeautifulSoup(sent).get_text()
#remove non-alphabetic characters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
#tokenize the sentences
words = word_tokenize(review_text.lower())
reviews.append(words)
return(reviews)
def extract_keywords(res):
keywords=set()
for i in res:
for j in i:
keywords.add(j)
return(keywords)
def clean_sentences3(text):
reviews = []
#remove html content
review_text = BeautifulSoup(text).get_text()
#remove non-alphabetic characters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
#tokenize the sentences
words = word_tokenize(review_text.lower())
stops = set(stopwords.words("english"))
#
# 5. Remove stop words
meaningful_words = [w for w in words if not w in stops]
reviews.append(meaningful_words)
return(reviews)
def decode_txt1(file_name): # for clean text
f= open(file_name,"r")
full_text = f.read()
clean_text=clean_sentences3(full_text)
f.close()
return clean_text
# return data
def decode_pdf(filename):
# creating a pdf file object
pdfFileObj = open(filename, 'rb')
# creating a pdf reader object
pdfReader1 = PyPDF2.PdfFileReader(pdfFileObj)
# printing number of pages in pdf file
num_pages=pdfReader1.numPages
# print(num)
text=open('Sample.txt','w')
for i in range(num_pages):
# creating a page object
pageObj = pdfReader1.getPage(i)
# extracting text from page
t=(pageObj.extractText())
text.write(t)
# closing the pdf file object
pdfFileObj.close()
text.close()
# print(text)
dec_txt=decode_txt1('Sample.txt')
#print(dec_txt)
return dec_txt
# resume_text
def extract_skills2(input_text):
found_skills=[]
# input_text=list(input_text)
for i in input_text:
# print('1',i)
# generate bigrams and trigrams (such as artificial intelligence)
bigrams_trigrams = list(map(' '.join, nltk.everygrams(i, 2, 3)))
# we create a set to keep the results in.
# found_skills = []
# we search for each token in our skills database
# for token in i:
token=i
if token.lower() in skills:
# print(found_skills)
found_skills.append(token)
# print('2',found_skills)
# we search for each bigram and trigram in our skills database
for ngram in bigrams_trigrams:
if ngram.lower() in skills:
found_skills.append(ngram)
# res.append(found_skills)
# print(found_skills)
return found_skills
#########################################
############# Upload your resume ############
#########################################
uploaded_file = st.file_uploader('Choose your .pdf file', type="pdf")
if uploaded_file is not None:
with open("input.pdf", "wb") as f:
base64_pdf = base64.b64encode(uploaded_file.read()).decode('utf-8')
f.write(base64.b64decode(base64_pdf))
f.close()
resume_text=decode_pdf("input.pdf") # enter the resume name
###################
####### select the category ############
####################
list_of_cats = [ 'Testing', 'HR', 'DESIGNER', 'INFORMATION-TECHNOLOGY', 'TEACHER', 'ADVOCATE','BUSINESS-DEVELOPMENT', 'HEALTHCARE', 'FITNESS', 'AGRICULTURE','BPO', 'SALES', 'CONSULTANT', 'DIGITAL-MEDIA', 'AUTOMOBILE','CHEF', 'FINANCE', 'APPAREL', 'ENGINEERING', 'ACCOUNTANT','CONSTRUCTION', 'PUBLIC-RELATIONS', 'BANKING', 'ARTS', 'AVIATION','Data Science', 'Advocate', 'Arts', 'Web Designing','Mechanical Engineer', 'Sales', 'Health and fitness','Civil Engineer', 'Java Developer', 'Business Analyst','SAP Developer', 'Automation Testing', 'Electrical Engineering','Operations Manager', 'Python Developer', 'DevOps Engineer','Network Security Engineer', 'PMO', 'Database', 'Hadoop','ETL Developer', 'DotNet Developer', 'Blockchain']
cat = st.selectbox("Select your desired Category",list_of_cats, index = 0)
#cat = "Testing" #@param ['HR', 'DESIGNER', 'INFORMATION-TECHNOLOGY', 'TEACHER', 'ADVOCATE','BUSINESS-DEVELOPMENT', 'HEALTHCARE', 'FITNESS', 'AGRICULTURE','BPO', 'SALES', 'CONSULTANT', 'DIGITAL-MEDIA', 'AUTOMOBILE','CHEF', 'FINANCE', 'APPAREL', 'ENGINEERING', 'ACCOUNTANT','CONSTRUCTION', 'PUBLIC-RELATIONS', 'BANKING', 'ARTS', 'AVIATION','Data Science', 'Advocate', 'Arts', 'Web Designing','Mechanical Engineer', 'Sales', 'Health and fitness','Civil Engineer', 'Java Developer', 'Business Analyst','SAP Developer', 'Automation Testing', 'Electrical Engineering','Operations Manager', 'Python Developer', 'DevOps Engineer','Network Security Engineer', 'PMO', 'Database', 'Hadoop','ETL Developer', 'DotNet Developer', 'Blockchain', 'Testing'] {allow-input: true}
print('You selected:', cat)
# cat='Data Science' # enter the category to extract
sub_df=df[df['Category']==cat]
sentences1=sentence_maker(sub_df['Resume_skills'])
"""βœ… **Extracting Data from PDF **"""
resume_text2=extract_skills2(resume_text[0])
resume_keywords=set(resume_text2)
# resume_keywords # keywords for existing resume
print(resume_keywords)
wc = WordCloud(width = 500, height = 500,include_numbers=True,collocations=True, background_color ='white',min_font_size = 10).generate(sentence_maker(resume_keywords))
plt.figure(figsize=(10,10))
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.title(' existing Keywords')
plt.show()
# """**sub_unique_words** contains list of category related keywords and existing resume keywords in **resume_keywords**"""
"""βœ… Generating ***Similarity Score*** with existing skillset"""
from cdifflib import CSequenceMatcher
def get_similarity_score(s1,s2):
sm= CSequenceMatcher(None,s1,s2)
return(str(round(sm.ratio()*100,3))+'%')
#return round(sm.ratio()*100,3)
wc_r = WordCloud(width = 500, height = 500,max_words=200,include_numbers=True,collocations=True,
background_color ='white',min_font_size = 10).generate(sentences1)
# plt.figure(figsize=(10,10))
# plt.imshow(wc_r, interpolation='bilinear')
# plt.axis("off")
# plt.title('Keywords for : '+cat)
"""βœ… **Getting the matching score with database**"""
sub_unique_words=list(wc_r.words_.keys())
resume_keywords=list(resume_keywords)
bigram = list(map(' '.join,ngrams(sub_unique_words, 1)))
# print(bigram)
sub_keywords=set()
for bg in bigram:
if bg in skills:
# print(bg)
sub_keywords.add(bg)
tokens = nltk.word_tokenize(sentence_maker(sub_unique_words))
for i in tokens:
sub_keywords.add(i)
def preprocess(words):
res=set()
for i in words:
#remove html content
review_text = BeautifulSoup(i).get_text()
#remove non-alphabetic characters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
#tokenize the sentences
words = word_tokenize(review_text.lower())
# print(words)
for j in words:
res.add(j)
# print(res)
return res
with st.spinner():
sub_unique_words_match=list(preprocess(sub_unique_words))
resume_keywords=list(preprocess(resume_keywords))
predicted_keywords_match=[i for i in sub_unique_words_match if i not in resume_keywords]
pred_keywords=[i for i in sub_keywords if i not in resume_keywords]
print(pred_keywords)
############################
#### final word cloud ######
############################
from collections import Counter
word_could_dict=Counter(pred_keywords)
wc = WordCloud(width = 500, height = 500,include_numbers=True,collocations=True,
background_color ='white',min_font_size = 10).generate_from_frequencies(word_could_dict)
plt.figure(figsize=(10,10))
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.title(' predicted keywords')
#plt.show()
wc.to_file('prediction.jpg') # enter file name to save
st.markdown("# Output")
col1, col2, col3, col4 = st.columns(4)
with col2:
st.markdown("### Predicted Keywords WordCloud")
st.image('prediction.jpg')
############################
#### similarty score ######
############################
existing_score = get_similarity_score(sub_unique_words_match,resume_keywords) # get the matching score for resume keywords and category keywords
predicted_result_score = get_similarity_score(predicted_keywords_match,sub_unique_words_match)# matching score for predicted keywords and category keywords
with col1:
st.markdown('### Existing Keywords :' )
st.metric( label = 'Score', value = existing_score)
with col3:
st.markdown(" ")
with col4:
st.markdown('### Predicted Keywords :' )
st.metric( label = 'Score', value = predicted_result_score)