Spaces:
Sleeping
Sleeping
File size: 4,750 Bytes
fd1d045 46cc7da 27b8fbd 46cc7da fd1d045 46cc7da 7e3c3cf 46cc7da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import HumanMessage
from langchain_core.messages import AIMessage
from langchain.memory import ChatMessageHistory
from langchain_openai import AzureChatOpenAI
from pypdf import PdfReader
import os
import gradio as gr
from openai import OpenAI
from gtts import gTTS
import requests
hf_token = os.getenv("HF_TOKEN")
API_URL = "https://api-inference.huggingface.co/models/openai/whisper-medium.en"
headers = {"Authorization": f"Bearer {hf_token}"}
class ScreeningAssistant:
def __init__(self):
self.chat = AzureChatOpenAI(azure_deployment = "GPT4")
# self.client = OpenAI()
def extract_text(self, pdf_path):
# creating a pdf reader object
reader = PdfReader(pdf_path)
all_text = ""
for page in reader.pages:
all_text += page.extract_text()
return all_text
def audio_to_text(self, audio_path):
with open(audio_path, "rb") as f:
data = f.read()
response = requests.post(API_URL, headers=headers, data=data)
print(response.json()['text'])
return response.json()['text']
def text_to_audio(self, mytext):
# Language in which you want to convert
language = 'en'
# have a high speed
myobj = gTTS(text=mytext, lang=language, slow=False)
audio_path = "welcome.mp3"
# Saving the converted audio in a mp3 file named
# welcome
myobj.save(audio_path)
# Audio(filename=audio_path, autoplay=True)
# os.remove(audio_path)
return audio_path
def get_response(self, audio_path, chat_history, resume, jd):
candidate = self.audio_to_text(audio_path)
resume = self.extract_text(resume.name)
jd = self.extract_text(jd.name)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""Your Task is Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer.
at the end exit with greeting to the candidate.
**Ask question follow up on the candidate response. get chat history.**
""",
),
MessagesPlaceholder(variable_name="messages"),
]
)
chain = prompt | self.chat
# chat_histroy_prompt = chat_history
answer = chain.invoke(
{
"messages": [
HumanMessage(
content=f" job description :{jd}\n Resume :{resume}"
),
AIMessage(content=f"""Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer.
chat history : {chat_history}"""),
HumanMessage(content=candidate),
],
}
)
result = answer.content
chat_history.append((candidate, result))
print("chat_history", chat_history)
audio_output = self.text_to_audio(result)
return "", chat_history, audio_output
def gradio_interface(self) -> None:
"""Create a Gradio interface for the chatbot."""
with gr.Blocks(css = "style.css" ,theme="HaleyCH/HaleyCH_Theme") as demo:
gr.HTML("""<center class="darkblue" text-align:center;padding:30px;'><center>
<center><h1 class ="center" style="color:#fff">ADOPLE AI</h1></center>
<br><center><h1 style="color:#fff">Screening Assistant Chatbot</h1></center>""")
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=1):
msg = gr.Textbox(label="Question", show_label=False)
with gr.Row():
with gr.Column(scale=0.50):
audio_path = gr.Audio(sources=["microphone"], type="filepath")
with gr.Column(scale=0.50):
play_audio = gr.Audio( value=None, autoplay=True)
with gr.Row():
with gr.Column(scale=0.25):
resume = gr.File(label="Resume")
with gr.Column(scale=0.25):
jd = gr.File(label="Job Description")
with gr.Column(scale=0.50):
clear = gr.ClearButton([chatbot])
audio_path.stop_recording(self.get_response, [audio_path, chatbot, resume, jd], [msg, chatbot, play_audio])
demo.launch()
if __name__=="__main__":
assistant = ScreeningAssistant()
assistant.gradio_interface() |