robertselvam's picture
Update app.py
a4f28db
raw
history blame contribute delete
No virus
3.65 kB
import os
import openai
import PyPDF2
import gradio as gr
import docx
class QuestionsGenerator:
def __init__(self):
openai.api_key = os.getenv("OPENAI_API_KEY")
def extract_text_from_file(self,file_path):
# Get the file extension
file_extension = os.path.splitext(file_path)[1]
if file_extension == '.pdf':
with open(file_path, 'rb') as file:
# Create a PDF file reader object
reader = PyPDF2.PdfFileReader(file)
# Create an empty string to hold the extracted text
extracted_text = ""
# Loop through each page in the PDF and extract the text
for page_number in range(reader.getNumPages()):
page = reader.getPage(page_number)
extracted_text += page.extractText()
return extracted_text
elif file_extension == '.txt':
with open(file_path, 'r') as file:
# Just read the entire contents of the text file
return file.read()
elif file_extension == '.docx':
doc = docx.Document(file_path)
text = []
for paragraph in doc.paragraphs:
text.append(paragraph.text)
return '\n'.join(text)
else:
return "Unsupported file type"
def response(self,job_description_path):
job_description_path = job_description_path.name
job_description = self.extract_text_from_file(job_description_path)
# Define the prompt or input for the model
prompt = f"""Generate interview questions for screening following job_description delimitted by triple backticks. Generate atmost ten questions.
```{job_description}```
"""
# Generate a response from the GPT-3 model
response = openai.Completion.create(
engine='text-davinci-003', # Choose the GPT-3 engine you want to use
prompt=prompt,
max_tokens=200, # Set the maximum number of tokens in the generated response
temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused
n=1, # Generate a single response
stop=None, # Specify an optional stop sequence to limit the length of the response
)
# Extract the generated text from the API response
generated_text = response.choices[0].text.strip()
return generated_text
def gradio_interface(self):
with gr.Blocks(css="style.css",theme='karthikeyan-adople/hudsonhayes-gray') as app:
gr.HTML("""<center class="darkblue" style='background-color:rgb(0,1,36); text-align:center;padding:30px;'><center>
<img class="leftimage" align="left" src="https://companieslogo.com/img/orig/RAND.AS_BIG-0f1935a4.png?t=1651813778" alt="Image" width="210" height="210">
<h1 class ="center" style="color:#fff">ADOPLE AI</h1></center>
<br><center><h1 style="color:#fff">Questions Generator for Screening</h1></center>""")
with gr.Row(elem_id="col-container"):
with gr.Column():
jobDescription = gr.File(label="Job Description",elem_classes="heightfit")
with gr.Column():
analyse = gr.Button("Generate")
with gr.Column():
result = gr.Textbox(label="Questions For Screening",lines=8)
analyse.click(self.response, [jobDescription], result)
app.launch()
ques = QuestionsGenerator()
ques.gradio_interface()