tayyab-077 commited on
Commit
a36268e
Β·
1 Parent(s): 3c5bfb7

updatation

Browse files
Files changed (2) hide show
  1. app.py +41 -94
  2. src/chatbot.py +39 -57
app.py CHANGED
@@ -1,19 +1,21 @@
1
- # app.py β€” from local backup
2
 
3
  import gradio as gr
4
  import os
5
  import tempfile
6
  import textwrap
7
  from datetime import datetime
8
- from pathlib import Path
9
  from typing import List, Dict, Any, Optional
 
 
10
 
11
  from src.model_loader import load_local_model
12
  from src.conversation import ConversationMemory
13
  from src.chatbot import LocalChatbot
14
 
15
-
16
-
 
17
  llm = load_local_model()
18
  memory = ConversationMemory()
19
  bot = LocalChatbot(llm, memory)
@@ -30,34 +32,20 @@ def now_ts():
30
 
31
 
32
  # ----------------------
33
- # EXPORT TXT/PDF
34
  # ----------------------
35
- import os
36
- import tempfile
37
- import textwrap
38
- from datetime import datetime
39
- from typing import List, Dict, Optional
40
- from reportlab.lib.pagesizes import A4
41
- from reportlab.pdfgen import canvas
42
-
43
- def export_chat_files(history: List[Dict[str, any]]) -> Dict[str, Optional[str]]:
44
  tmpdir = tempfile.gettempdir()
45
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
46
  txt_path = os.path.join(tmpdir, f"chat_history_{timestamp}.txt")
47
  pdf_path = os.path.join(tmpdir, f"chat_history_{timestamp}.pdf")
48
 
49
-
50
  def remove_last_closing_line(lines):
51
  closing_keywords = [
52
- "let me know", "is there anything else",
53
- "anything else i can help", "feel free to ask",
54
- "hope this helps", "need further assistance",
55
- "feel free", "happy to help",
56
- "hello! how can i assist you today?",
57
- "are there any specific industries or areas you'd like to explore in more detail?",
58
- "how can i help you better?", "what did you like about our interaction?",
59
- "do you have any feedback on your experience?", "would you like to explore",
60
- "need clarification"
61
  ]
62
  if not lines:
63
  return lines
@@ -66,22 +54,18 @@ def export_chat_files(history: List[Dict[str, any]]) -> Dict[str, Optional[str]]
66
  return lines[:-1]
67
  return lines
68
 
69
- # ---------------- TXT FILE ----------------
70
  with open(txt_path, "w", encoding="utf-8") as f:
71
  for msg in history:
72
- content_data = msg.get("content", "")
73
- if isinstance(content_data, dict):
74
- content = content_data.get("text", "")
75
- else:
76
- content = str(content_data)
77
-
78
- lines = content.splitlines()
79
- clean = [l.strip() for l in lines if l.strip() and not l.strip().startswith("πŸ•’")]
80
- clean = remove_last_closing_line(clean)
81
- f.write("\n".join(clean) + "\n\n")
82
  f.write("-" * 60 + "\n\n")
83
 
84
- # ---------------- PDF FILE ----------------
85
  try:
86
  c = canvas.Canvas(pdf_path, pagesize=A4)
87
  page_width, page_height = A4
@@ -89,41 +73,37 @@ def export_chat_files(history: List[Dict[str, any]]) -> Dict[str, Optional[str]]
89
  y = page_height - margin
90
  line_height = 14
91
  font_size = 10
92
-
93
  c.setFont("Helvetica", font_size)
94
 
95
  for msg in history:
96
  role = msg.get("role", "user").capitalize()
97
- content_data = msg.get("content", "")
98
- if isinstance(content_data, dict):
99
- content = content_data.get("text", "")
100
- else:
101
- content = str(content_data)
102
 
103
- lines = content.splitlines()
104
- clean_lines = [l.strip() for l in lines if l.strip()]
105
- clean_lines = remove_last_closing_line(clean_lines)
106
 
107
- for line in clean_lines:
108
  wrapped = textwrap.wrap(line, width=95)
109
  for wline in wrapped:
110
  if y < margin + line_height:
111
  c.showPage()
112
  c.setFont("Helvetica", font_size)
113
  y = page_height - margin
114
- c.drawString(margin, y, f"{role}: {wline}" if role=="User" else wline)
115
  y -= line_height
116
-
117
- y -= line_height # spacing between messages
118
 
119
  c.showPage()
120
  c.save()
121
  except Exception as e:
122
  print("PDF export failed:", e)
123
  pdf_path = None
124
-
125
  return {"txt": txt_path, "pdf": pdf_path}
126
 
 
127
  # ----------------------
128
  # Core chat function
129
  # ----------------------
@@ -134,6 +114,7 @@ def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
134
  if not user_msg.strip():
135
  return history
136
 
 
137
  intent = None
138
  low = user_msg.lower()
139
  for key in INTENT_TEMPLATES:
@@ -142,19 +123,13 @@ def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
142
  user_msg = user_msg[len(key):].strip()
143
  break
144
 
145
-
146
- system_prefix = INTENT_TEMPLATES.get(intent, None)
147
- if system_prefix:
148
- prompt = f"{system_prefix}\nUser: {user_msg}"
149
- else:
150
- prompt = f"User: {user_msg}"
151
-
152
- bot_reply = bot.ask(prompt)
153
  ts = now_ts()
154
  bot_reply_ts = f"{bot_reply}\n\nπŸ•’ {ts}"
155
 
156
- history.append({"role": "user", "content": user_msg})
157
- history.append({"role": "assistant", "content": bot_reply_ts})
158
 
159
  try:
160
  memory.add(user_msg, bot_reply)
@@ -165,7 +140,7 @@ def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
165
 
166
 
167
  # ----------------------
168
- # CUSTOM CSS
169
  # ----------------------
170
  CUSTOM_CSS = """
171
  /* GLOBAL */
@@ -205,7 +180,6 @@ CUSTOM_CSS = """
205
  padding: 12px;
206
  }
207
 
208
-
209
  /* Input box */
210
  #message-box textarea {
211
  background: #e0e7ff !important;
@@ -219,9 +193,8 @@ CUSTOM_CSS = """
219
  color: white !important;
220
  transition: background 0.2s ease, transform 0.2s ease;
221
  }
222
-
223
  .send-btn:hover {
224
- background: #4338ca !important; /* darker indigo */
225
  transform: scale(1.05);
226
  }
227
 
@@ -230,18 +203,12 @@ CUSTOM_CSS = """
230
  background: #f1f5f9 !important;
231
  transition: background 0.2s ease, transform 0.2s ease;
232
  }
233
-
234
  .icon-btn:hover {
235
- background: #e2e8f0 !important; /* slightly darker */
236
  transform: scale(1.05);
237
  }
238
-
239
  """
240
 
241
- #----------------------
242
- #JS (Voice only)
243
- #----------------------
244
-
245
  PAGE_JS = """
246
  <script>
247
  (function(){
@@ -260,8 +227,6 @@ PAGE_JS = """
260
 
261
  recog.onresult = function(e){
262
  textarea.value = e.results[0][0].transcript;
263
-
264
- // Visual flash to confirm input
265
  textarea.style.background = "#e7f5ff";
266
  setTimeout(() => { textarea.style.background = ""; }, 400);
267
  };
@@ -272,33 +237,20 @@ PAGE_JS = """
272
  </script>
273
  """
274
 
275
- # ----------------------
276
- # UI
277
- # ----------------------
278
  with gr.Blocks(title="Tayyab β€” Chatbot") as demo:
279
- gr.HTML(f"""
280
- <style>{CUSTOM_CSS}</style>
281
- <script>{PAGE_JS}</script>
282
- """)
283
 
284
  with gr.Row():
285
  with gr.Column(scale=1, min_width=220):
286
  gr.Markdown("### ⚑ Tools & Export")
287
-
288
  new_chat_btn = gr.Button("βž• New Chat")
289
  export_btn = gr.Button("πŸ“₯ Export TXT/PDF")
290
 
291
  with gr.Column(scale=3, elem_id="main_card"):
292
  gr.Markdown("<h3>Smart Learning Assistant - Tayyab</h3>")
293
  chatbot = gr.Chatbot(height=480, elem_id="chatbot_box")
294
-
295
  with gr.Row():
296
- msg = gr.Textbox(
297
- placeholder="Type a message or use the mic",
298
- elem_id="message-box",
299
- show_label=False,
300
- lines=3,
301
- )
302
  send_btn = gr.Button("Send", elem_classes="send-btn")
303
  mic_btn = gr.Button("🎀 Voice input", elem_classes="icon-btn")
304
  mic_btn.click(None, None, None, js='startVoiceRecognition("message-box")')
@@ -315,19 +267,14 @@ with gr.Blocks(title="Tayyab β€” Chatbot") as demo:
315
 
316
  new_chat_btn.click(new_chat, outputs=[chatbot])
317
 
318
-
319
  def export_handler(history):
320
- # history is already list of dicts: [{"role": "...", "content": "..."}]
321
  files = export_chat_files(history or [])
322
-
323
  return (
324
  gr.update(value=files.get("txt"), visible=True),
325
  gr.update(value=files.get("pdf"), visible=bool(files.get("pdf")))
326
  )
327
 
328
-
329
  export_btn.click(export_handler, inputs=[chatbot], outputs=[file_txt, file_pdf])
330
-
331
-
332
  if __name__ == "__main__":
333
  demo.launch()
 
1
+ # app.py
2
 
3
  import gradio as gr
4
  import os
5
  import tempfile
6
  import textwrap
7
  from datetime import datetime
 
8
  from typing import List, Dict, Any, Optional
9
+ from reportlab.lib.pagesizes import A4
10
+ from reportlab.pdfgen import canvas
11
 
12
  from src.model_loader import load_local_model
13
  from src.conversation import ConversationMemory
14
  from src.chatbot import LocalChatbot
15
 
16
+ # -----------------------
17
+ # Initialize
18
+ # -----------------------
19
  llm = load_local_model()
20
  memory = ConversationMemory()
21
  bot = LocalChatbot(llm, memory)
 
32
 
33
 
34
  # ----------------------
35
+ # Export TXT/PDF
36
  # ----------------------
37
+ def export_chat_files(history: List[Dict[str, Any]]) -> Dict[str, Optional[str]]:
 
 
 
 
 
 
 
 
38
  tmpdir = tempfile.gettempdir()
39
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
40
  txt_path = os.path.join(tmpdir, f"chat_history_{timestamp}.txt")
41
  pdf_path = os.path.join(tmpdir, f"chat_history_{timestamp}.pdf")
42
 
 
43
  def remove_last_closing_line(lines):
44
  closing_keywords = [
45
+ "let me know", "is there anything else", "anything else i can help",
46
+ "feel free to ask", "hope this helps", "need further assistance",
47
+ "feel free", "happy to help", "hello! how can i assist you today?",
48
+ "how can i help you better?", "would you like to explore", "need clarification"
 
 
 
 
 
49
  ]
50
  if not lines:
51
  return lines
 
54
  return lines[:-1]
55
  return lines
56
 
57
+ # TXT
58
  with open(txt_path, "w", encoding="utf-8") as f:
59
  for msg in history:
60
+ content = msg.get("content", "")
61
+ if isinstance(content, dict):
62
+ content = content.get("text", "")
63
+ lines = [l.strip() for l in str(content).splitlines() if l.strip() and not l.strip().startswith("πŸ•’")]
64
+ lines = remove_last_closing_line(lines)
65
+ f.write("\n".join(lines) + "\n\n")
 
 
 
 
66
  f.write("-" * 60 + "\n\n")
67
 
68
+ # PDF
69
  try:
70
  c = canvas.Canvas(pdf_path, pagesize=A4)
71
  page_width, page_height = A4
 
73
  y = page_height - margin
74
  line_height = 14
75
  font_size = 10
 
76
  c.setFont("Helvetica", font_size)
77
 
78
  for msg in history:
79
  role = msg.get("role", "user").capitalize()
80
+ content = msg.get("content", "")
81
+ if isinstance(content, dict):
82
+ content = content.get("text", "")
 
 
83
 
84
+ lines = [l.strip() for l in str(content).splitlines() if l.strip() and not l.strip().startswith("πŸ•’")]
85
+ lines = remove_last_closing_line(lines)
 
86
 
87
+ for line in lines:
88
  wrapped = textwrap.wrap(line, width=95)
89
  for wline in wrapped:
90
  if y < margin + line_height:
91
  c.showPage()
92
  c.setFont("Helvetica", font_size)
93
  y = page_height - margin
94
+ c.drawString(margin, y, f"{role}: {wline}")
95
  y -= line_height
96
+ y -= line_height
 
97
 
98
  c.showPage()
99
  c.save()
100
  except Exception as e:
101
  print("PDF export failed:", e)
102
  pdf_path = None
103
+
104
  return {"txt": txt_path, "pdf": pdf_path}
105
 
106
+
107
  # ----------------------
108
  # Core chat function
109
  # ----------------------
 
114
  if not user_msg.strip():
115
  return history
116
 
117
+ # Detect intent prefix from templates
118
  intent = None
119
  low = user_msg.lower()
120
  for key in INTENT_TEMPLATES:
 
123
  user_msg = user_msg[len(key):].strip()
124
  break
125
 
126
+ # Ask chatbot (pass intent)
127
+ bot_reply = bot.ask(user_msg, intent=intent)
 
 
 
 
 
 
128
  ts = now_ts()
129
  bot_reply_ts = f"{bot_reply}\n\nπŸ•’ {ts}"
130
 
131
+ history.append({"role": "user", "content": str(user_msg)})
132
+ history.append({"role": "assistant", "content": str(bot_reply_ts)})
133
 
134
  try:
135
  memory.add(user_msg, bot_reply)
 
140
 
141
 
142
  # ----------------------
143
+ # UI / Gradio
144
  # ----------------------
145
  CUSTOM_CSS = """
146
  /* GLOBAL */
 
180
  padding: 12px;
181
  }
182
 
 
183
  /* Input box */
184
  #message-box textarea {
185
  background: #e0e7ff !important;
 
193
  color: white !important;
194
  transition: background 0.2s ease, transform 0.2s ease;
195
  }
 
196
  .send-btn:hover {
197
+ background: #4338ca !important;
198
  transform: scale(1.05);
199
  }
200
 
 
203
  background: #f1f5f9 !important;
204
  transition: background 0.2s ease, transform 0.2s ease;
205
  }
 
206
  .icon-btn:hover {
207
+ background: #e2e8f0 !important;
208
  transform: scale(1.05);
209
  }
 
210
  """
211
 
 
 
 
 
212
  PAGE_JS = """
213
  <script>
214
  (function(){
 
227
 
228
  recog.onresult = function(e){
229
  textarea.value = e.results[0][0].transcript;
 
 
230
  textarea.style.background = "#e7f5ff";
231
  setTimeout(() => { textarea.style.background = ""; }, 400);
232
  };
 
237
  </script>
238
  """
239
 
 
 
 
240
  with gr.Blocks(title="Tayyab β€” Chatbot") as demo:
241
+ gr.HTML(f"<style>{CUSTOM_CSS}</style><script>{PAGE_JS}</script>")
 
 
 
242
 
243
  with gr.Row():
244
  with gr.Column(scale=1, min_width=220):
245
  gr.Markdown("### ⚑ Tools & Export")
 
246
  new_chat_btn = gr.Button("βž• New Chat")
247
  export_btn = gr.Button("πŸ“₯ Export TXT/PDF")
248
 
249
  with gr.Column(scale=3, elem_id="main_card"):
250
  gr.Markdown("<h3>Smart Learning Assistant - Tayyab</h3>")
251
  chatbot = gr.Chatbot(height=480, elem_id="chatbot_box")
 
252
  with gr.Row():
253
+ msg = gr.Textbox(placeholder="Type a message or use the mic", elem_id="message-box", show_label=False, lines=3)
 
 
 
 
 
254
  send_btn = gr.Button("Send", elem_classes="send-btn")
255
  mic_btn = gr.Button("🎀 Voice input", elem_classes="icon-btn")
256
  mic_btn.click(None, None, None, js='startVoiceRecognition("message-box")')
 
267
 
268
  new_chat_btn.click(new_chat, outputs=[chatbot])
269
 
 
270
  def export_handler(history):
 
271
  files = export_chat_files(history or [])
 
272
  return (
273
  gr.update(value=files.get("txt"), visible=True),
274
  gr.update(value=files.get("pdf"), visible=bool(files.get("pdf")))
275
  )
276
 
 
277
  export_btn.click(export_handler, inputs=[chatbot], outputs=[file_txt, file_pdf])
278
+
 
279
  if __name__ == "__main__":
280
  demo.launch()
src/chatbot.py CHANGED
@@ -1,35 +1,33 @@
1
  # src/chatbot.py
2
 
3
  from typing import Dict, Any, Optional
4
- from src.intent import detect_intent
5
  from src.templates import TEMPLATES
 
6
 
7
- # -------------------------------
8
- # DEFAULT GENERATION ARGUMENTS
9
- # -------------------------------
10
 
11
  DEFAULT_GEN_ARGS = {
12
- "max_tokens": 350,
13
- "temperature": 0.6,
14
- "top_p": 0.9
15
  }
16
 
17
-
18
- MSG_SEPARATOR = "\n"
19
-
20
-
21
  class LocalChatbot:
22
  def __init__(self, llm, memory, default_template: Optional[str] = "general"):
23
  self.llm = llm
24
  self.memory = memory
25
  self.default_template = default_template
26
 
 
 
 
27
  def _build_system_prompt(self, intent: str) -> str:
28
- # get template for intent
29
  return TEMPLATES.get(intent, TEMPLATES.get(self.default_template, TEMPLATES["general"]))
30
 
 
 
 
31
  def _build_prompt(self, user_message: str, intent: str, max_pairs: int = 12) -> str:
32
- # Trim memory to recent pairs before building prompt
33
  try:
34
  self.memory.trim_to_recent_pairs(max_pairs)
35
  except Exception:
@@ -37,44 +35,52 @@ class LocalChatbot:
37
 
38
  system_prompt = self._build_system_prompt(intent)
39
  history_text = self.memory.get_formatted(separator=MSG_SEPARATOR)
40
-
41
- parts = [
42
- f"System: {system_prompt}",
43
- history_text,
44
- f"User: {user_message}",
45
- "Assistant:"
46
- ]
47
- # join non-empty parts
48
- return MSG_SEPARATOR.join([p for p in parts if p is not None and p != ""])
49
-
50
 
51
- def ask(self, user_message: str, gen_args: Optional[Dict[str, Any]] = None) -> str:
52
- if not user_message or not user_message.strip():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  return "Please enter a message."
54
 
55
- # Detect intent
56
- intent = detect_intent(user_message)
 
57
 
58
  # Build prompt
59
  prompt = self._build_prompt(user_message, intent, max_pairs=12)
60
 
61
-
62
- # Merge generation args
63
  gen = DEFAULT_GEN_ARGS.copy()
64
  if gen_args:
65
  gen.update(gen_args)
66
 
67
- # Attempt to call the LLM (defensive: handle different API variants)
68
  try:
69
  output = self.llm(prompt, **gen)
70
  except TypeError:
71
- # fallback mapping: map max_tokens -> max_new_tokens
72
  alt_gen = gen.copy()
73
  if "max_tokens" in alt_gen:
74
  alt_gen["max_new_tokens"] = alt_gen.pop("max_tokens")
75
  output = self.llm(prompt, **alt_gen)
76
 
77
- # Parse the output robustly
78
  bot_reply = ""
79
  try:
80
  if isinstance(output, dict) and "choices" in output:
@@ -89,7 +95,7 @@ class LocalChatbot:
89
  if not bot_reply:
90
  bot_reply = "Sorry β€” I couldn't generate a response. Please try again."
91
 
92
- # Add to memory
93
  try:
94
  self.memory.add(user_message, bot_reply)
95
  except Exception:
@@ -100,27 +106,3 @@ class LocalChatbot:
100
  pass
101
 
102
  return bot_reply
103
-
104
-
105
-
106
-
107
- # # Create the generation args HERE
108
- # gen = DEFAULT_GEN_ARGS.copy()
109
- # if gen_args:
110
- # gen.update(gen_args)
111
-
112
- # response = self.llm(
113
- # prompt,
114
- # max_tokens=gen["max_tokens"],
115
- # temperature=gen["temperature"],
116
- # top_p=gen["top_p"],
117
-
118
- # stop=["</system>", "\nUser:", "\nUser says:", "\nSystem:", "\nAssistant:", "\nYou:"]
119
-
120
-
121
- # )
122
-
123
- # reply = response["choices"][0]["text"].strip()
124
-
125
- # self.memory.add(user_message, reply)
126
- # return reply
 
1
  # src/chatbot.py
2
 
3
  from typing import Dict, Any, Optional
 
4
  from src.templates import TEMPLATES
5
+ from src.intent import detect_intent
6
 
7
+ MSG_SEPARATOR = "\n"
 
 
8
 
9
  DEFAULT_GEN_ARGS = {
10
+ "max_tokens": 350,
11
+ "temperature": 0.6,
12
+ "top_p": 0.9
13
  }
14
 
 
 
 
 
15
  class LocalChatbot:
16
  def __init__(self, llm, memory, default_template: Optional[str] = "general"):
17
  self.llm = llm
18
  self.memory = memory
19
  self.default_template = default_template
20
 
21
+ # ------------------------------------------------
22
+ # Build system prompt based on intent
23
+ # ------------------------------------------------
24
  def _build_system_prompt(self, intent: str) -> str:
 
25
  return TEMPLATES.get(intent, TEMPLATES.get(self.default_template, TEMPLATES["general"]))
26
 
27
+ # ------------------------------------------------
28
+ # Build full prompt with memory and user message
29
+ # ------------------------------------------------
30
  def _build_prompt(self, user_message: str, intent: str, max_pairs: int = 12) -> str:
 
31
  try:
32
  self.memory.trim_to_recent_pairs(max_pairs)
33
  except Exception:
 
35
 
36
  system_prompt = self._build_system_prompt(intent)
37
  history_text = self.memory.get_formatted(separator=MSG_SEPARATOR)
 
 
 
 
 
 
 
 
 
 
38
 
39
+ parts = [
40
+ f"System: {system_prompt}",
41
+ history_text,
42
+ f"User: {user_message}",
43
+ "Assistant:"
44
+ ]
45
+
46
+ return MSG_SEPARATOR.join([p for p in parts if p])
47
+
48
+ # ------------------------------------------------
49
+ # Main ask function
50
+ # ------------------------------------------------
51
+ def ask(self, user_message: Any, gen_args: Optional[Dict[str, Any]] = None, intent: Optional[str] = None) -> str:
52
+ # Extract text if passed from Gradio
53
+ if isinstance(user_message, list):
54
+ user_message = "\n".join([item.get("text", "") if isinstance(item, dict) else str(item) for item in user_message])
55
+ elif isinstance(user_message, dict) and "text" in user_message:
56
+ user_message = user_message["text"]
57
+
58
+ user_message = str(user_message).strip()
59
+ if not user_message:
60
  return "Please enter a message."
61
 
62
+ # Use passed intent or detect
63
+ if intent is None:
64
+ intent = detect_intent(user_message)
65
 
66
  # Build prompt
67
  prompt = self._build_prompt(user_message, intent, max_pairs=12)
68
 
69
+ # Merge generation args
 
70
  gen = DEFAULT_GEN_ARGS.copy()
71
  if gen_args:
72
  gen.update(gen_args)
73
 
74
+ # Call LLM
75
  try:
76
  output = self.llm(prompt, **gen)
77
  except TypeError:
 
78
  alt_gen = gen.copy()
79
  if "max_tokens" in alt_gen:
80
  alt_gen["max_new_tokens"] = alt_gen.pop("max_tokens")
81
  output = self.llm(prompt, **alt_gen)
82
 
83
+ # Parse output
84
  bot_reply = ""
85
  try:
86
  if isinstance(output, dict) and "choices" in output:
 
95
  if not bot_reply:
96
  bot_reply = "Sorry β€” I couldn't generate a response. Please try again."
97
 
98
+ # Store memory
99
  try:
100
  self.memory.add(user_message, bot_reply)
101
  except Exception:
 
106
  pass
107
 
108
  return bot_reply