Akashmj22122002 commited on
Commit
e47fb69
·
verified ·
1 Parent(s): 4a2d140

Upload folder using huggingface_hub

Browse files
4_lab4.ipynb CHANGED
@@ -438,22 +438,7 @@
438
  "cell_type": "code",
439
  "execution_count": null,
440
  "metadata": {},
441
- "outputs": [
442
- {
443
- "ename": "FileNotFoundError",
444
- "evalue": "[Errno 2] No such file or directory: 'me/linkedin.pdf'",
445
- "output_type": "error",
446
- "traceback": [
447
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
448
- "\u001b[31mFileNotFoundError\u001b[39m Traceback (most recent call last)",
449
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[24]\u001b[39m\u001b[32m, line 163\u001b[39m\n\u001b[32m 159\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m response.choices[\u001b[32m0\u001b[39m].message.content\n\u001b[32m 162\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[34m__name__\u001b[39m == \u001b[33m\"\u001b[39m\u001b[33m__main__\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m--> \u001b[39m\u001b[32m163\u001b[39m me = \u001b[43mMe\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 164\u001b[39m gr.ChatInterface(me.chat, \u001b[38;5;28mtype\u001b[39m=\u001b[33m\"\u001b[39m\u001b[33mmessages\u001b[39m\u001b[33m\"\u001b[39m).launch()\n",
450
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[24]\u001b[39m\u001b[32m, line 93\u001b[39m, in \u001b[36mMe.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 90\u001b[39m \u001b[38;5;28mself\u001b[39m.openai = gemini \u001b[38;5;66;03m# REPLACED OpenAI WITH GEMINI\u001b[39;00m\n\u001b[32m 91\u001b[39m \u001b[38;5;28mself\u001b[39m.name = \u001b[33m\"\u001b[39m\u001b[33mEd Donner\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m---> \u001b[39m\u001b[32m93\u001b[39m reader = \u001b[43mPdfReader\u001b[49m\u001b[43m(\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mme/linkedin.pdf\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[32m 94\u001b[39m \u001b[38;5;28mself\u001b[39m.linkedin = \u001b[33m\"\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 95\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m page \u001b[38;5;129;01min\u001b[39;00m reader.pages:\n",
451
- "\u001b[36mFile \u001b[39m\u001b[32md:\\projectsUdemy\\agents\\.venv\\Lib\\site-packages\\pypdf\\_reader.py:131\u001b[39m, in \u001b[36mPdfReader.__init__\u001b[39m\u001b[34m(self, stream, strict, password)\u001b[39m\n\u001b[32m 127\u001b[39m \u001b[38;5;28mself\u001b[39m._page_id2num: Optional[\u001b[38;5;28mdict\u001b[39m[Any, Any]] = \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 129\u001b[39m \u001b[38;5;28mself\u001b[39m._validated_root: Optional[DictionaryObject] = \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m131\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_initialize_stream\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 132\u001b[39m \u001b[38;5;28mself\u001b[39m._known_objects: \u001b[38;5;28mset\u001b[39m[\u001b[38;5;28mtuple\u001b[39m[\u001b[38;5;28mint\u001b[39m, \u001b[38;5;28mint\u001b[39m]] = \u001b[38;5;28mset\u001b[39m()\n\u001b[32m 134\u001b[39m \u001b[38;5;28mself\u001b[39m._override_encryption = \u001b[38;5;28;01mFalse\u001b[39;00m\n",
452
- "\u001b[36mFile \u001b[39m\u001b[32md:\\projectsUdemy\\agents\\.venv\\Lib\\site-packages\\pypdf\\_reader.py:150\u001b[39m, in \u001b[36mPdfReader._initialize_stream\u001b[39m\u001b[34m(self, stream)\u001b[39m\n\u001b[32m 148\u001b[39m \u001b[38;5;28mself\u001b[39m._stream_opened = \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[32m 149\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(stream, (\u001b[38;5;28mstr\u001b[39m, Path)):\n\u001b[32m--> \u001b[39m\u001b[32m150\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mrb\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m fh:\n\u001b[32m 151\u001b[39m stream = BytesIO(fh.read())\n\u001b[32m 152\u001b[39m \u001b[38;5;28mself\u001b[39m._stream_opened = \u001b[38;5;28;01mTrue\u001b[39;00m\n",
453
- "\u001b[31mFileNotFoundError\u001b[39m: [Errno 2] No such file or directory: 'me/linkedin.pdf'"
454
- ]
455
- }
456
- ],
457
  "source": [
458
  "from dotenv import load_dotenv\n",
459
  "from openai import OpenAI\n",
@@ -618,7 +603,9 @@
618
  "\n",
619
  "if __name__ == \"__main__\":\n",
620
  " me = Me()\n",
621
- " gr.ChatInterface(me.chat, type=\"messages\").launch()\n"
 
 
622
  ]
623
  },
624
  {
 
438
  "cell_type": "code",
439
  "execution_count": null,
440
  "metadata": {},
441
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
  "source": [
443
  "from dotenv import load_dotenv\n",
444
  "from openai import OpenAI\n",
 
603
  "\n",
604
  "if __name__ == \"__main__\":\n",
605
  " me = Me()\n",
606
+ " gr.ChatInterface(me.chat, type=\"messages\").launch()\n",
607
+ " # gr.ChatInterface(me.chat).launch()\n",
608
+ "\n"
609
  ]
610
  },
611
  {
app.py CHANGED
@@ -133,6 +133,176 @@
133
  # gr.ChatInterface(me.chat, type="messages").launch()
134
 
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  from dotenv import load_dotenv
137
  from openai import OpenAI
138
  import json
@@ -140,57 +310,68 @@ import os
140
  import requests
141
  from pypdf import PdfReader
142
  import gradio as gr
 
 
143
 
144
  load_dotenv(override=True)
145
 
 
146
  GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
147
  google_api_key = os.getenv("GOOGLE_API_KEY")
148
 
149
- # Initialize Gemini client
150
- gemini = OpenAI(
151
- base_url=GEMINI_BASE_URL,
152
- api_key=google_api_key
153
- )
154
 
 
155
  def push(text):
156
- requests.post(
157
- "https://api.pushover.net/1/messages.json",
158
- data={
159
- "token": os.getenv("PUSHOVER_TOKEN"),
160
- "user": os.getenv("PUSHOVER_USER"),
161
- "message": text,
162
- }
163
- )
164
-
 
 
 
 
165
 
 
166
  def record_user_details(email, name="Name not provided", notes="not provided"):
167
- push(f"Recording {name} with email {email} and notes {notes}")
168
- return {"recorded": "ok"}
169
-
170
 
171
  def record_unknown_question(question):
172
- push(f"Recording {question}")
173
- return {"recorded": "ok"}
174
-
175
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  record_user_details_json = {
177
  "name": "record_user_details",
178
- "description": "Use this tool to record that a user is interested in being in touch and provided an email address",
179
  "parameters": {
180
  "type": "object",
181
  "properties": {
182
- "email": {
183
- "type": "string",
184
- "description": "The email address of this user"
185
- },
186
- "name": {
187
- "type": "string",
188
- "description": "The user's name, if they provided it"
189
- },
190
- "notes": {
191
- "type": "string",
192
- "description": "Any additional information about the conversation that's worth recording to give context"
193
- }
194
  },
195
  "required": ["email"],
196
  "additionalProperties": False
@@ -199,48 +380,93 @@ record_user_details_json = {
199
 
200
  record_unknown_question_json = {
201
  "name": "record_unknown_question",
202
- "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
203
  "parameters": {
204
  "type": "object",
205
  "properties": {
206
- "question": {
207
- "type": "string",
208
- "description": "The question that couldn't be answered"
209
- },
210
  },
211
  "required": ["question"],
212
  "additionalProperties": False
213
  }
214
  }
215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  tools = [
217
  {"type": "function", "function": record_user_details_json},
218
- {"type": "function", "function": record_unknown_question_json}
 
219
  ]
220
 
221
-
222
  class Me:
223
-
224
  def __init__(self):
225
- self.openai = gemini # REPLACED OpenAI WITH GEMINI
226
  self.name = "AKASH M J"
227
 
228
- reader = PdfReader("me/Profile.pdf")
229
  self.linkedin = ""
230
- for page in reader.pages:
231
- text = page.extract_text()
232
- if text:
233
- self.linkedin += text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
 
235
- with open("me/summary.txt", "r", encoding="utf-8") as f:
236
- self.summary = f.read()
 
 
 
 
 
 
 
 
237
 
238
  def handle_tool_call(self, tool_calls):
239
  results = []
240
  for tool_call in tool_calls:
241
  tool_name = tool_call.function.name
242
- arguments = json.loads(tool_call.function.arguments)
243
- print(f"Tool called: {tool_name}", flush=True)
 
 
 
244
  tool = globals().get(tool_name)
245
  result = tool(**arguments) if tool else {}
246
  results.append({
@@ -250,52 +476,94 @@ class Me:
250
  })
251
  return results
252
 
253
- def system_prompt(self):
254
- system_prompt = (
255
- f"You are acting as {self.name}. You are answering questions on {self.name}'s website, "
256
- f"particularly questions related to {self.name}'s career, background, skills and experience. "
257
- f"Your responsibility is to represent {self.name} for interactions on the website as faithfully as possible. "
258
- f"You are given a summary of {self.name}'s background and LinkedIn profile which you can use to answer questions. "
259
- f"Be professional and engaging, as if talking to a potential client or future employer who came across the website. "
260
- f"If you don't know the answer to any question, use your record_unknown_question tool to record the question. "
261
- f"If the user is engaging in discussion, try to steer them towards getting in touch via email."
262
- )
263
-
264
- system_prompt += f"\n\n## Summary:\n{self.summary}\n\n## LinkedIn Profile:\n{self.linkedin}\n\n"
265
- system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
266
- return system_prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
 
268
  def chat(self, message, history):
269
- messages = [
270
- {"role": "system", "content": self.system_prompt()}
271
- ] + history + [
272
- {"role": "user", "content": message}
273
- ]
 
 
 
 
 
 
 
 
 
 
274
 
 
275
  done = False
276
  while not done:
277
- # ---- CHANGED TO USE GEMINI ----
278
  response = self.openai.chat.completions.create(
279
- model="gemini-2.5-flash",
280
  messages=messages,
281
  tools=tools
282
  )
283
- # --------------------------------
284
 
285
- if response.choices[0].finish_reason == "tool_calls":
286
- message = response.choices[0].message
287
- tool_calls = message.tool_calls
 
 
288
  results = self.handle_tool_call(tool_calls)
289
- messages.append(message)
290
  messages.extend(results)
 
291
  else:
292
  done = True
293
 
294
- return response.choices[0].message.content
 
 
 
 
 
 
 
 
295
 
 
296
 
 
297
  if __name__ == "__main__":
298
  me = Me()
299
- # gr.ChatInterface(me.chat, type="messages").launch()
300
- gr.ChatInterface(me.chat).launch()
301
-
 
133
  # gr.ChatInterface(me.chat, type="messages").launch()
134
 
135
 
136
+
137
+
138
+ # from dotenv import load_dotenv
139
+ # from openai import OpenAI
140
+ # import json
141
+ # import os
142
+ # import requests
143
+ # from pypdf import PdfReader
144
+ # import gradio as gr
145
+
146
+ # load_dotenv(override=True)
147
+
148
+ # GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
149
+ # google_api_key = os.getenv("GOOGLE_API_KEY")
150
+
151
+ # # Initialize Gemini client
152
+ # gemini = OpenAI(
153
+ # base_url=GEMINI_BASE_URL,
154
+ # api_key=google_api_key
155
+ # )
156
+
157
+ # def push(text):
158
+ # requests.post(
159
+ # "https://api.pushover.net/1/messages.json",
160
+ # data={
161
+ # "token": os.getenv("PUSHOVER_TOKEN"),
162
+ # "user": os.getenv("PUSHOVER_USER"),
163
+ # "message": text,
164
+ # }
165
+ # )
166
+
167
+
168
+ # def record_user_details(email, name="Name not provided", notes="not provided"):
169
+ # push(f"Recording {name} with email {email} and notes {notes}")
170
+ # return {"recorded": "ok"}
171
+
172
+
173
+ # def record_unknown_question(question):
174
+ # push(f"Recording {question}")
175
+ # return {"recorded": "ok"}
176
+
177
+
178
+ # record_user_details_json = {
179
+ # "name": "record_user_details",
180
+ # "description": "Use this tool to record that a user is interested in being in touch and provided an email address",
181
+ # "parameters": {
182
+ # "type": "object",
183
+ # "properties": {
184
+ # "email": {
185
+ # "type": "string",
186
+ # "description": "The email address of this user"
187
+ # },
188
+ # "name": {
189
+ # "type": "string",
190
+ # "description": "The user's name, if they provided it"
191
+ # },
192
+ # "notes": {
193
+ # "type": "string",
194
+ # "description": "Any additional information about the conversation that's worth recording to give context"
195
+ # }
196
+ # },
197
+ # "required": ["email"],
198
+ # "additionalProperties": False
199
+ # }
200
+ # }
201
+
202
+ # record_unknown_question_json = {
203
+ # "name": "record_unknown_question",
204
+ # "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
205
+ # "parameters": {
206
+ # "type": "object",
207
+ # "properties": {
208
+ # "question": {
209
+ # "type": "string",
210
+ # "description": "The question that couldn't be answered"
211
+ # },
212
+ # },
213
+ # "required": ["question"],
214
+ # "additionalProperties": False
215
+ # }
216
+ # }
217
+
218
+ # tools = [
219
+ # {"type": "function", "function": record_user_details_json},
220
+ # {"type": "function", "function": record_unknown_question_json}
221
+ # ]
222
+
223
+
224
+ # class Me:
225
+
226
+ # def __init__(self):
227
+ # self.openai = gemini # REPLACED OpenAI WITH GEMINI
228
+ # self.name = "AKASH M J"
229
+
230
+ # reader = PdfReader("me/Profile.pdf")
231
+ # self.linkedin = ""
232
+ # for page in reader.pages:
233
+ # text = page.extract_text()
234
+ # if text:
235
+ # self.linkedin += text
236
+
237
+ # with open("me/summary.txt", "r", encoding="utf-8") as f:
238
+ # self.summary = f.read()
239
+
240
+ # def handle_tool_call(self, tool_calls):
241
+ # results = []
242
+ # for tool_call in tool_calls:
243
+ # tool_name = tool_call.function.name
244
+ # arguments = json.loads(tool_call.function.arguments)
245
+ # print(f"Tool called: {tool_name}", flush=True)
246
+ # tool = globals().get(tool_name)
247
+ # result = tool(**arguments) if tool else {}
248
+ # results.append({
249
+ # "role": "tool",
250
+ # "content": json.dumps(result),
251
+ # "tool_call_id": tool_call.id
252
+ # })
253
+ # return results
254
+
255
+ # def system_prompt(self):
256
+ # system_prompt = (
257
+ # f"You are acting as {self.name}. You are answering questions on {self.name}'s website, "
258
+ # f"particularly questions related to {self.name}'s career, background, skills and experience. "
259
+ # f"Your responsibility is to represent {self.name} for interactions on the website as faithfully as possible. "
260
+ # f"You are given a summary of {self.name}'s background and LinkedIn profile which you can use to answer questions. "
261
+ # f"Be professional and engaging, as if talking to a potential client or future employer who came across the website. "
262
+ # f"If you don't know the answer to any question, use your record_unknown_question tool to record the question. "
263
+ # f"If the user is engaging in discussion, try to steer them towards getting in touch via email."
264
+ # )
265
+
266
+ # system_prompt += f"\n\n## Summary:\n{self.summary}\n\n## LinkedIn Profile:\n{self.linkedin}\n\n"
267
+ # system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
268
+ # return system_prompt
269
+
270
+ # def chat(self, message, history):
271
+ # messages = [
272
+ # {"role": "system", "content": self.system_prompt()}
273
+ # ] + history + [
274
+ # {"role": "user", "content": message}
275
+ # ]
276
+
277
+ # done = False
278
+ # while not done:
279
+ # # ---- CHANGED TO USE GEMINI ----
280
+ # response = self.openai.chat.completions.create(
281
+ # model="gemini-2.0-flash",
282
+ # messages=messages,
283
+ # tools=tools
284
+ # )
285
+ # # --------------------------------
286
+
287
+ # if response.choices[0].finish_reason == "tool_calls":
288
+ # message = response.choices[0].message
289
+ # tool_calls = message.tool_calls
290
+ # results = self.handle_tool_call(tool_calls)
291
+ # messages.append(message)
292
+ # messages.extend(results)
293
+ # else:
294
+ # done = True
295
+
296
+ # return response.choices[0].message.content
297
+
298
+
299
+ # if __name__ == "__main__":
300
+ # me = Me()
301
+ # gr.ChatInterface(me.chat, type="messages").launch()
302
+ # # gr.ChatInterface(me.chat).launch()
303
+
304
+
305
+ # app.py
306
  from dotenv import load_dotenv
307
  from openai import OpenAI
308
  import json
 
310
  import requests
311
  from pypdf import PdfReader
312
  import gradio as gr
313
+ import sqlite3
314
+ import time
315
 
316
  load_dotenv(override=True)
317
 
318
+ # --- CONFIG ---
319
  GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
320
  google_api_key = os.getenv("GOOGLE_API_KEY")
321
 
322
+ # Initialize Gemini client (using OpenAI wrapper you used earlier)
323
+ gemini = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)
 
 
 
324
 
325
+ # --- Pushover helper ---
326
  def push(text):
327
+ token = os.getenv("PUSHOVER_TOKEN")
328
+ user = os.getenv("PUSHOVER_USER")
329
+ if not token or not user:
330
+ print("Pushover credentials not set. Skipping push.")
331
+ return
332
+ try:
333
+ requests.post(
334
+ "https://api.pushover.net/1/messages.json",
335
+ data={"token": token, "user": user, "message": text},
336
+ timeout=5
337
+ )
338
+ except Exception as e:
339
+ print("Pushover error:", e)
340
 
341
+ # --- Tools (actual implementations) ---
342
  def record_user_details(email, name="Name not provided", notes="not provided"):
343
+ push(f"Recording contact: {name} <{email}> notes: {notes}")
344
+ return {"recorded": "ok", "email": email, "name": name}
 
345
 
346
  def record_unknown_question(question):
347
+ push(f"Unknown question recorded: {question}")
348
+ # Optionally write to a local file for audits
349
+ os.makedirs("me/logs", exist_ok=True)
350
+ with open("me/logs/unknown_questions.txt", "a", encoding="utf-8") as f:
351
+ f.write(question.strip() + "\n")
352
+ return {"recorded": "ok", "question": question}
353
+
354
+ def search_faq(query):
355
+ db_path = os.path.join("me", "qa.db")
356
+ if not os.path.exists(db_path):
357
+ return {"answer": "FAQ database not found."}
358
+ conn = sqlite3.connect(db_path)
359
+ cur = conn.cursor()
360
+ cur.execute("SELECT answer FROM faq WHERE question LIKE ? LIMIT 1", (f"%{query}%",))
361
+ row = cur.fetchone()
362
+ conn.close()
363
+ return {"answer": row[0]} if row else {"answer": "not found"}
364
+
365
+ # --- Tool JSON metadata (for function-calling style) ---
366
  record_user_details_json = {
367
  "name": "record_user_details",
368
+ "description": "Record an interested user's email and optional name/notes.",
369
  "parameters": {
370
  "type": "object",
371
  "properties": {
372
+ "email": {"type": "string"},
373
+ "name": {"type": "string"},
374
+ "notes": {"type": "string"}
 
 
 
 
 
 
 
 
 
375
  },
376
  "required": ["email"],
377
  "additionalProperties": False
 
380
 
381
  record_unknown_question_json = {
382
  "name": "record_unknown_question",
383
+ "description": "Record any question the assistant could not answer.",
384
  "parameters": {
385
  "type": "object",
386
  "properties": {
387
+ "question": {"type": "string"}
 
 
 
388
  },
389
  "required": ["question"],
390
  "additionalProperties": False
391
  }
392
  }
393
 
394
+ search_faq_json = {
395
+ "name": "search_faq",
396
+ "description": "Search the FAQ database for a question.",
397
+ "parameters": {
398
+ "type": "object",
399
+ "properties": {
400
+ "query": {"type": "string"}
401
+ },
402
+ "required": ["query"],
403
+ "additionalProperties": False
404
+ }
405
+ }
406
+
407
  tools = [
408
  {"type": "function", "function": record_user_details_json},
409
+ {"type": "function", "function": record_unknown_question_json},
410
+ {"type": "function", "function": search_faq_json}
411
  ]
412
 
413
+ # --- The assistant class ---
414
  class Me:
 
415
  def __init__(self):
416
+ self.openai = gemini
417
  self.name = "AKASH M J"
418
 
419
+ # Load profile PDF into self.linkedin
420
  self.linkedin = ""
421
+ try:
422
+ reader = PdfReader(os.path.join("me", "Profile.pdf"))
423
+ for page in reader.pages:
424
+ text = page.extract_text()
425
+ if text:
426
+ self.linkedin += text + "\n"
427
+ except Exception as e:
428
+ print("Could not read Profile.pdf:", e)
429
+
430
+ # Load summary
431
+ try:
432
+ with open(os.path.join("me", "summary.txt"), "r", encoding="utf-8") as f:
433
+ self.summary = f.read()
434
+ except Exception as e:
435
+ print("Could not read summary.txt:", e)
436
+ self.summary = ""
437
+
438
+ # Load knowledge files (RAG-style simple concatenation)
439
+ self.knowledge = ""
440
+ kb_dir = os.path.join("me", "knowledge")
441
+ if os.path.exists(kb_dir):
442
+ for fn in sorted(os.listdir(kb_dir)):
443
+ path = os.path.join(kb_dir, fn)
444
+ try:
445
+ with open(path, "r", encoding="utf-8") as f:
446
+ self.knowledge += f"# {fn}\n" + f.read() + "\n\n"
447
+ except Exception as e:
448
+ print("Error reading", path, e)
449
 
450
+ def system_prompt(self):
451
+ system_prompt = (
452
+ f"You are acting as {self.name}. Answer questions about {self.name}'s background "
453
+ "and experience using the context provided. Be professional and concise. "
454
+ "If you don't know an answer, use the record_unknown_question tool."
455
+ )
456
+ system_prompt += f"\n\n## Summary:\n{self.summary}\n\n"
457
+ system_prompt += f"## LinkedIn profile (extracted):\n{self.linkedin}\n\n"
458
+ system_prompt += f"## Knowledge base:\n{self.knowledge}\n\n"
459
+ return system_prompt
460
 
461
  def handle_tool_call(self, tool_calls):
462
  results = []
463
  for tool_call in tool_calls:
464
  tool_name = tool_call.function.name
465
+ try:
466
+ arguments = json.loads(tool_call.function.arguments)
467
+ except Exception:
468
+ arguments = {}
469
+ print("Tool called:", tool_name, arguments, flush=True)
470
  tool = globals().get(tool_name)
471
  result = tool(**arguments) if tool else {}
472
  results.append({
 
476
  })
477
  return results
478
 
479
+ # Simple router/orchestrator: route common queries to the FAQ or to the LLM
480
+ def route_question(self, question):
481
+ q = question.lower()
482
+ # keywords that map to FAQ
483
+ faq_keywords = ["project", "tech stack", "stack", "skill", "skills", "study", "education", "experience"]
484
+ if any(k in q for k in faq_keywords):
485
+ return "search_faq"
486
+ return None
487
+
488
+ def evaluate_answer(self, user_question, ai_answer):
489
+ # Simple evaluator: ask the LLM to judge the quality
490
+ eval_prompt = f"""
491
+ You are an evaluator. Judge whether the assistant reply is clear, correct, and complete for the user question.
492
+ Return exactly PASS or FAIL and a one-line reason.
493
+
494
+ User question:
495
+ {user_question}
496
+
497
+ Assistant reply:
498
+ {ai_answer}
499
+ """
500
+ try:
501
+ ev = self.openai.chat.completions.create(
502
+ model="gemini-2.0-flash",
503
+ messages=[{"role":"system","content":"You are an evaluator."},
504
+ {"role":"user","content":eval_prompt}]
505
+ )
506
+ text = ev.choices[0].message.content.strip()
507
+ # very simple parse
508
+ if text.upper().startswith("PASS"):
509
+ return {"result":"PASS", "note": text}
510
+ else:
511
+ return {"result":"FAIL", "note": text}
512
+ except Exception as e:
513
+ print("Evaluator failed:", e)
514
+ return {"result":"UNKNOWN", "note": str(e)}
515
 
516
  def chat(self, message, history):
517
+ # build messages with system prompt + history + user
518
+ messages = [{"role":"system","content":self.system_prompt()}] + history + [{"role":"user","content":message}]
519
+
520
+ # 1) Router: check if the question should use the FAQ tool
521
+ tool_to_use = self.route_question(message)
522
+ if tool_to_use == "search_faq":
523
+ # call tool directly and return evaluated answer
524
+ tool_result = search_faq(message)
525
+ raw_answer = tool_result.get("answer", "I don't have that in my FAQ.")
526
+ eval_res = self.evaluate_answer(message, raw_answer)
527
+ if eval_res["result"] == "PASS":
528
+ return raw_answer
529
+ else:
530
+ # fall back to LLM if FAIL
531
+ pass
532
 
533
+ # 2) Normal LLM flow with tools support (function-calling style)
534
  done = False
535
  while not done:
 
536
  response = self.openai.chat.completions.create(
537
+ model="gemini-2.0-flash",
538
  messages=messages,
539
  tools=tools
540
  )
 
541
 
542
+ finish = response.choices[0].finish_reason
543
+ if finish == "tool_calls":
544
+ # the LLM asked to call a tool
545
+ message_obj = response.choices[0].message
546
+ tool_calls = getattr(message_obj, "tool_calls", [])
547
  results = self.handle_tool_call(tool_calls)
548
+ messages.append(message_obj)
549
  messages.extend(results)
550
+ # loop again so the LLM can consume tool outputs
551
  else:
552
  done = True
553
 
554
+ ai_answer = response.choices[0].message.content
555
+ # 3) Evaluate the answer; if FAIL, ask LLM to improve
556
+ eval_res = self.evaluate_answer(message, ai_answer)
557
+ if eval_res["result"] == "FAIL":
558
+ # ask the model to improve using the critique
559
+ improve_prompt = f"User question:\n{message}\n\nAssistant previous reply:\n{ai_answer}\n\nEvaluator note:\n{eval_res['note']}\n\nPlease produce an improved concise answer."
560
+ messages.append({"role":"user","content":improve_prompt})
561
+ improved_resp = self.openai.chat.completions.create(model="gemini-2.0-flash", messages=messages)
562
+ ai_answer = improved_resp.choices[0].message.content
563
 
564
+ return ai_answer
565
 
566
+ # --- Launch ---
567
  if __name__ == "__main__":
568
  me = Me()
569
+ gr.ChatInterface(me.chat, type="messages").launch()
 
 
init_db.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # init_db.py
2
+ import sqlite3
3
+ import os
4
+
5
+ os.makedirs("me", exist_ok=True)
6
+ db_path = os.path.join("me", "qa.db")
7
+
8
+ conn = sqlite3.connect(db_path)
9
+ cur = conn.cursor()
10
+
11
+ cur.execute("""
12
+ CREATE TABLE IF NOT EXISTS faq (
13
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
14
+ question TEXT,
15
+ answer TEXT
16
+ )
17
+ """)
18
+
19
+ # sample seed data (you can edit)
20
+ faq_data = [
21
+ ("What is Akash's tech stack?", "Akash works with the MERN stack: MongoDB, Express, React, Node.js."),
22
+ ("What projects has Akash completed?", "Online Marketplace for Local Artisans, Freelance Collaboration Platform, Hotel Reservation System."),
23
+ ("What is Akash studying?", "MCA at KLN College."),
24
+ ]
25
+
26
+ # clear existing sample rows (optional)
27
+ cur.execute("DELETE FROM faq")
28
+ cur.executemany("INSERT INTO faq (question, answer) VALUES (?, ?)", faq_data)
29
+
30
+ conn.commit()
31
+ conn.close()
32
+ print("me/qa.db created and seeded.")
me/knowledge/certifications.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ The Complete 2023 Web Development Bootcamp.
2
+ Java Certification: Covered core concepts, OOPS principles, and practical applications.
3
+ Full Stack Web Development: focused on front-end and back-end technologies including HTML, CSS, JavaScript, Node.js, and Express.js.
4
+ Software Testing (Basic): Completed at Besant Technologies, covering Manual testing, Automation testing, SDLC, and STLC.
me/knowledge/education.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Master of Computer Applications (MCA)
2
+ Institute: K.L.N. College of Engineering, Madurai.
3
+ Duration: 2023 – 2025.
4
+ Performance: 8.6 CGPA.
5
+
6
+ B.Sc. Computer Science
7
+ Institute: Sourashtra College, Madurai.
8
+ Duration: 2020 – 2023.
9
+ Performance: 8.1 CGPA.
10
+
11
+
12
+ Computer Science - HSC
13
+ Institute: The TVS School
14
+ Duration: 2019-2020
15
+ Performance: 67.66%
16
+
17
+
18
+ SSLC
19
+ Institute : KNG Matriculation School
20
+ Duration : Till 2018
21
+ Performance : 91 %
22
+
me/knowledge/experience.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Analyst (DevOps Analyst) | KPMG India, Bengaluru
2
+ Duration: July 2025 – Present.
3
+ Department: Advisory Consulting - Technology Transformation - Digital Engineering & Quality Team.
4
+ Focus: Currently as a Trainer
5
+
6
+
7
+
8
+ Web Development Intern | Digizura Technologies Private Limited, Bengaluru
9
+ Duration: January 2025 – June 2025.
10
+
11
+ Key Contributions:
12
+ Developed core modules (RFQ, Business Development) using Angular and LoopBack for lifecycle automation.
13
+ Implemented Excel parsing logic to auto-generate BOM tables and margin calculations.
14
+ Developed the frontend for the IT Audit module to streamline workflow reviews.
15
+ Collaborated with backend teams for API integration and data flow.
me/knowledge/projects.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ Online Marketplace for Local Artisans
2
+ Description: A full-stack web application designed for local artisans to display, personalize, and sell handmade products.
3
+ Key Features: Product listing, customization, secure Stripe payment integration, live chat between buyer and seller, and role-based access control (artisan, customer, admin).
4
+ Tech Stack: React.js, Node.js, Express.js, MongoDB, Stripe, Tailwind CSS, Socket.io.
5
+
6
+ Hotel Management System
7
+ Description: A system allowing staff to manage room bookings, customer details, and services.
8
+ Key Features: Integrated MySQL database for efficient data storage and retrieval of booking information.
9
+ Tech Stack: HTML, Tailwind CSS, JavaScript, PHP, MySQL, XAMPP Server.
me/knowledge/skills.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Programming Languages: Java (Core Java), JavaScript.
2
+ Web Development: HTML, CSS, Bootstrap, Node.js, Express.js.
3
+ Databases: MySQL.
4
+ Testing: Automation Testing, Selenium WebDriver (Basic).
5
+ Languages: English, Tamil, Hindi, Sourashtra
me/logs/unknown_questions.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Does Akash M J hold any patents?
2
+ Do you have any knowledge on cricket?
me/qa.db ADDED
Binary file (12.3 kB). View file