File size: 2,688 Bytes
aeff24d
f30615b
f0b1fb1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b82853c
f0b1fb1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
!python -m pip install --upgrade pip
!pip install https://pypi.org/simple/bitsandbytes
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import BitsAndBytesConfig
import pdfplumber
from langchain.prompts import PromptTemplate

nf4_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_use_double_quant=True,
    bnb_4bit_compute_dtype=torch.bfloat16
)

model_id = "huggingFaceH4/zephyr-7b-alpha"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=nf4_config,
    device_map="auto"
)

model.tie_weights()

pipe = pipeline("text-generation",
                model=model,
                tokenizer=tokenizer,
                max_new_tokens= 512
                )

llm = HuggingFacePipeline(pipeline=pipe)

## LLM Response
def get_llm_response(input):
  res = llm.predict(input)
  return res

def input_pdf_text(uploaded_file):
  with open(uploaded_file, 'rb') as f:
    pdf = pdfplumber.open(f)
    text = ""
    for page in pdf.pages:
        text += page.extract_text()
    return text


def Get_Response(upload_pdf,jd):
  text = input_pdf_text(upload_pdf)
  prompt_template = PromptTemplate.from_template(
    """
    Hey Act Like a skilled or very experience ATS(Application Tracking System)
    with a deep understanding of tech field, software engineering, data science,
    data analyst and big data engineer. Your Task is to evaluate the resume based on the
    given job description.
    You must consider the job market is very competitive and you should provide
    best assistance for the improving the resume. Assisn the percentage Matching 
    based on JD(Job Description) and the missing keywords with high accuracy
    resume:{text}
    description:{jd}

    I want the response in one single tring having the structure
    {{"JD Match":"%","MissingKeywords:[]","Profile Summary":""}}
        """)
  prompt = prompt_template.format(text=text,jd=jd)
  response = llm.predict(prompt)
  return response

# Define Gradio interface
interface = gr.Interface(
    fn=Get_Response,
    inputs=["file","text"],
    # inputs=[
    #     gr.File("upload_pdf", label="Upload PDF"),
    #     gr.Textbox("jd", label="Job Description"),
    # ],
    outputs="text",
    title="Get ATS-Style Resume Evaluation",
    description="Upload a resume PDF and provide a job description to get an evaluation with JD match percentage, missing keywords, and profile summary.",
)

# Launch the Gradio application
interface.launch(debug=True, share=True)