robertselvam commited on
Commit
46cc7da
·
verified ·
1 Parent(s): 609094b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +122 -80
app.py CHANGED
@@ -6,84 +6,126 @@ from langchain_openai import AzureChatOpenAI
6
  from pypdf import PdfReader
7
  import os
8
  import gradio as gr
9
-
10
- chat = AzureChatOpenAI(azure_deployment = "GPT-3")
11
-
12
- def extract_text( pdf_path):
13
- # creating a pdf reader object
14
- reader = PdfReader(pdf_path)
15
- all_text = ""
16
-
17
- for page in reader.pages:
18
- all_text += page.extract_text()
19
- return all_text
20
-
21
- def get_response( candidate, chat_history, resume, jd):
22
-
23
- resume = extract_text(resume.name)
24
- jd = extract_text(jd.name)
25
-
26
- prompt = ChatPromptTemplate.from_messages(
27
- [
28
- (
29
- "system",
30
- """Your Task is Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer.
31
- at the end exit with greeting to the candidate.
32
- **Ask question follow up on the candidate response. get chat history.**
33
- """,
34
- ),
35
- MessagesPlaceholder(variable_name="messages"),
36
- ]
37
- )
38
-
39
- chain = prompt | chat
40
-
41
- # chat_histroy_prompt = chat_history
42
-
43
- answer = chain.invoke(
44
- {
45
- "messages": [
46
- HumanMessage(
47
- content=f" job description :{jd}\n Resume :{resume}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  ),
49
- AIMessage(content=f"""Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer.
50
- chat history : {chat_history}"""),
51
- HumanMessage(content=candidate),
52
- ],
53
- }
54
- )
55
- # print("INTERVIEWER :", answer.content)
56
- # chat_history.append({"candidate":candidate,"interviewer":answer.content })
57
-
58
- result = answer.content
59
- chat_history.append((candidate, result))
60
- print("chat_history", chat_history)
61
- return "", chat_history
62
-
63
- def gradio_interface() -> None:
64
- """Create a Gradio interface for the chatbot."""
65
- with gr.Blocks(css = "style.css" ,theme="HaleyCH/HaleyCH_Theme") as demo:
66
-
67
- gr.HTML("""<center class="darkblue" text-align:center;padding:30px;'><center>
68
- <center><h1 class ="center" style="color:#fff">ADOPLE AI</h1></center>
69
- <br><center><h1 style="color:#fff">Screening Assistant Chatbot</h1></center>""")
70
-
71
- chatbot = gr.Chatbot()
72
-
73
- with gr.Row():
74
- with gr.Column(scale=1):
75
- msg = gr.Textbox(label="Question")
76
-
77
- with gr.Row():
78
- with gr.Column(scale=0.15):
79
- resume = gr.File(label="Resume")
80
- with gr.Column(scale=0.15):
81
- jd = gr.File(label="Job Description")
82
- with gr.Column(scale=0.85):
83
- clear = gr.ClearButton([msg, chatbot])
84
-
85
- msg.submit(get_response, [msg, chatbot, resume, jd], [msg, chatbot])
86
-
87
- demo.launch(debug =True, share=True)
88
-
89
- gradio_interface()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  from pypdf import PdfReader
7
  import os
8
  import gradio as gr
9
+ from openai import OpenAI
10
+ from gtts import gTTS
11
+ from IPython.display import Audio
12
+ import requests
13
+
14
+
15
+ hf_token = os.getenv("HF_TOKEN")
16
+ API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3"
17
+ headers = {"Authorization": f"Bearer {hf_token}"}
18
+
19
+ class ScreeningAssistant:
20
+ def __init__(self):
21
+ self.chat = AzureChatOpenAI(azure_deployment = "GPT4")
22
+ # self.client = OpenAI()
23
+
24
+ def extract_text(self, pdf_path):
25
+ # creating a pdf reader object
26
+ reader = PdfReader(pdf_path)
27
+ all_text = ""
28
+
29
+ for page in reader.pages:
30
+ all_text += page.extract_text()
31
+ return all_text
32
+
33
+ def audio_to_text(self, audio_path):
34
+ with open(audio_path, "rb") as f:
35
+ data = f.read()
36
+ response = requests.post(API_URL, headers=headers, data=data)
37
+ print(response.json()['text'])
38
+ return response.json()['text']
39
+
40
+
41
+ def text_to_audio(self, mytext):
42
+ # Language in which you want to convert
43
+ language = 'en'
44
+ # have a high speed
45
+ myobj = gTTS(text=mytext, lang=language, slow=False)
46
+
47
+ audio_path = "welcome.mp3"
48
+ # Saving the converted audio in a mp3 file named
49
+ # welcome
50
+ myobj.save(audio_path)
51
+
52
+ # Audio(filename=audio_path, autoplay=True)
53
+
54
+ # os.remove(audio_path)
55
+ return audio_path
56
+
57
+ def get_response(self, audio_path, chat_history, resume, jd):
58
+
59
+ candidate = self.audio_to_text(audio_path)
60
+ resume = self.extract_text(resume.name)
61
+ jd = self.extract_text(jd.name)
62
+
63
+ prompt = ChatPromptTemplate.from_messages(
64
+ [
65
+ (
66
+ "system",
67
+ """Your Task is Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer.
68
+ at the end exit with greeting to the candidate.
69
+ **Ask question follow up on the candidate response. get chat history.**
70
+ """,
71
  ),
72
+ MessagesPlaceholder(variable_name="messages"),
73
+ ]
74
+ )
75
+
76
+ chain = prompt | self.chat
77
+
78
+ # chat_histroy_prompt = chat_history
79
+
80
+ answer = chain.invoke(
81
+ {
82
+ "messages": [
83
+ HumanMessage(
84
+ content=f" job description :{jd}\n Resume :{resume}"
85
+ ),
86
+ AIMessage(content=f"""Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer.
87
+ chat history : {chat_history}"""),
88
+ HumanMessage(content=candidate),
89
+ ],
90
+ }
91
+ )
92
+
93
+ result = answer.content
94
+ chat_history.append((candidate, result))
95
+ print("chat_history", chat_history)
96
+ audio_output = self.text_to_audio(result)
97
+ return "", chat_history, audio_output
98
+
99
+ def gradio_interface(self) -> None:
100
+ """Create a Gradio interface for the chatbot."""
101
+ with gr.Blocks(css = "style.css" ,theme="HaleyCH/HaleyCH_Theme") as demo:
102
+
103
+ gr.HTML("""<center class="darkblue" text-align:center;padding:30px;'><center>
104
+ <center><h1 class ="center" style="color:#fff">ADOPLE AI</h1></center>
105
+ <br><center><h1 style="color:#fff">Screening Assistant Chatbot</h1></center>""")
106
+
107
+ chatbot = gr.Chatbot()
108
+
109
+ with gr.Row():
110
+ with gr.Column(scale=1):
111
+ msg = gr.Textbox(label="Question", show_label=False)
112
+ with gr.Row():
113
+ with gr.Column(scale=0.50):
114
+ audio_path = gr.Audio(sources=["microphone"], type="filepath")
115
+ with gr.Column(scale=0.50):
116
+ play_audio = gr.Audio( value=None, autoplay=True)
117
+ with gr.Row():
118
+ with gr.Column(scale=0.25):
119
+ resume = gr.File(label="Resume")
120
+ with gr.Column(scale=0.25):
121
+ jd = gr.File(label="Job Description")
122
+ with gr.Column(scale=0.50):
123
+ clear = gr.ClearButton([chatbot])
124
+
125
+ audio_path.stop_recording(self.get_response, [audio_path, chatbot, resume, jd], [msg, chatbot, play_audio])
126
+
127
+ demo.launch(debug =True, share=True)
128
+
129
+ if __name__=="__main__":
130
+ assistant = ScreeningAssistant()
131
+ assistant.gradio_interface()