Spaces:
Sleeping
Sleeping
File size: 2,680 Bytes
e9f8bde bbcccc6 e9f8bde 58a5b04 e9f8bde |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import streamlit as st
from utils import *
import uuid
#Creating session variables
if 'unique_id' not in st.session_state:
st.session_state['unique_id'] =''
def main():
st.set_page_config(page_title="Resume Screening Assistance")
st.title("HR - Resume Screening Assistance...π ")
st.subheader("I can help you in resume screening process")
st.sidebar.title("π")
job_description = st.text_area("Please paste the 'JOB DESCRIPTION' here...",key="1")
document_count = st.text_input("No.of 'RESUMES' to return",key="2")
# Upload the Resumes (pdf files)
pdf = st.file_uploader("Upload resumes here, only PDF files allowed", type=["pdf"],accept_multiple_files=True)
submit=st.button("Help me with the analysis")
if submit:
with st.spinner('Wait for it...'):
#Creating a unique ID, so that we can use to query and get only the user uploaded documents from PINECONE vector store
st.session_state['unique_id']=uuid.uuid4().hex
#Create a documents list out of all the user uploaded pdf files
final_docs_list=create_docs(pdf,st.session_state['unique_id'])
#st.write(final_docs_list)
#Displaying the count of resumes that have been uploaded
st.write("*Resumes uploaded* :"+str(len(final_docs_list)))
#Create embeddings instance
embeddings=create_embeddings_load_data()
#Fecth relavant documents from Vectorspace
relavant_docs=close_matches(job_description,document_count,final_docs_list,embeddings)
#Introducing a line separator
st.write(":heavy_minus_sign:" * 30)
#For each item in relavant docs - we are displaying some info of it on the UI
for item in range(len(relavant_docs)):
st.subheader("π "+str(item+1))
#Displaying Filepath
st.write("**File** : "+relavant_docs[item][0].metadata['name'])
#Introducing Expander feature
with st.expander('Show me π'):
st.info("**Match Score** : "+ str(1 - relavant_docs[item][1]))
#st.write("***"+relavant_docs[item][0].page_content)
#Gets the summary of the current item using 'get_summary' function that we have created which uses LLM & Langchain chain
summary = get_summary(relavant_docs[item][0])
st.write("**Summary** : "+summary)
st.success("Hope I was able to save your timeβ€οΈ")
#Invoking main function
if __name__ == '__main__':
main() |