DevNumb commited on
Commit
edc7f70
Β·
verified Β·
1 Parent(s): 47b0d3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -16
app.py CHANGED
@@ -28,22 +28,24 @@ tokenizer, model = load_model()
28
 
29
  def remove_think_tags(text):
30
  """
31
- Remove <think>...</think> tags from text - METHOD 1
32
  """
33
  cleaned_text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
34
  return cleaned_text.strip()
35
 
36
  def generate_response(message, history, temperature=0.7, max_length=256):
37
  """
38
- Generate a response using Qwen3-0.6B with your specified method
39
  """
40
  if tokenizer is None or model is None:
41
  return "⚠️ Model is not loaded properly. Please check the console logs."
42
 
43
  try:
44
- # Convert history to messages format
45
  messages = []
46
- for human_msg, assistant_msg in history:
 
 
47
  messages.extend([
48
  {"role": "user", "content": human_msg},
49
  {"role": "assistant", "content": assistant_msg}
@@ -52,7 +54,7 @@ def generate_response(message, history, temperature=0.7, max_length=256):
52
  # Add current message
53
  messages.append({"role": "user", "content": message})
54
 
55
- # Apply chat template exactly as in your example
56
  inputs = tokenizer.apply_chat_template(
57
  messages,
58
  add_generation_prompt=True,
@@ -91,22 +93,22 @@ def generate_response(message, history, temperature=0.7, max_length=256):
91
 
92
  def chat_interface(message, history, temperature, max_length):
93
  """
94
- Main chat interface function
95
  """
96
  if not message or not message.strip():
97
  return "", history or []
98
 
99
- # Generate response
100
  bot_response = generate_response(message, history or [], temperature, max_length)
101
 
102
- # Update history
103
  new_history = (history or []) + [[message, bot_response]]
104
 
105
  return "", new_history
106
 
107
  def clear_chat():
108
  """
109
- Clear the chat history
110
  """
111
  return []
112
 
@@ -129,7 +131,7 @@ def retry_last_response(history, temperature, max_length):
129
 
130
  return new_history
131
 
132
- # Enhanced CSS for beautiful UI with settings below chat
133
  custom_css = """
134
  .gradio-container {
135
  font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
@@ -219,6 +221,7 @@ custom_css = """
219
  padding: 30px !important;
220
  margin: 0 !important;
221
  border-radius: 0 !important;
 
222
  }
223
 
224
  #chatbot .message {
@@ -447,6 +450,16 @@ custom_css = """
447
  background: #ed8936;
448
  box-shadow: 0 0 10px #ed8936;
449
  }
 
 
 
 
 
 
 
 
 
 
450
  """
451
 
452
  # Create the Gradio interface
@@ -464,7 +477,7 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
464
  # Chatbot
465
  with gr.Column(elem_classes="chatbot-wrapper"):
466
  chatbot = gr.Chatbot(
467
- value=[["Hello! How can I assist you today? 😊", ""]],
468
  label="",
469
  elem_id="chatbot",
470
  show_copy_button=True,
@@ -493,7 +506,7 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
493
  clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", elem_classes="clear-btn")
494
  retry_btn = gr.Button("πŸ”„ Retry Last", elem_classes="retry-btn")
495
  gr.HTML("""<div style="flex: 1; text-align: center; color: #666; font-size: 14px; padding: 12px;">
496
- ✨ Powered by Qwen3-0.6B β€’ Think tags automatically removed
497
  </div>""")
498
 
499
  # Controls Section (Below Chat)
@@ -539,6 +552,7 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
539
 
540
  **Model:** Qwen3-0.6B βœ…
541
  **Think Tags:** Auto-Removed βœ…
 
542
 
543
  <div class="model-features">
544
  **Key Features:**
@@ -581,25 +595,31 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
581
  )
582
 
583
  # Event handlers
 
 
 
 
 
 
584
  submit_event = msg.submit(
585
- chat_interface,
586
  inputs=[msg, chatbot, temperature, max_length],
587
  outputs=[msg, chatbot]
588
  )
589
 
590
  submit_btn.click(
591
- chat_interface,
592
  inputs=[msg, chatbot, temperature, max_length],
593
  outputs=[msg, chatbot]
594
  )
595
 
596
  clear_btn.click(
597
- clear_chat,
598
  outputs=[chatbot]
599
  )
600
 
601
  retry_btn.click(
602
- retry_last_response,
603
  inputs=[chatbot, temperature, max_length],
604
  outputs=[chatbot]
605
  )
 
28
 
29
  def remove_think_tags(text):
30
  """
31
+ Remove <think>...</think> tags from text
32
  """
33
  cleaned_text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
34
  return cleaned_text.strip()
35
 
36
  def generate_response(message, history, temperature=0.7, max_length=256):
37
  """
38
+ Generate a response using Qwen3-0.6B
39
  """
40
  if tokenizer is None or model is None:
41
  return "⚠️ Model is not loaded properly. Please check the console logs."
42
 
43
  try:
44
+ # Convert history to messages format - LIMIT HISTORY to last 6 exchanges
45
  messages = []
46
+ recent_history = history[-6:] if history else [] # Keep only last 6 exchanges
47
+
48
+ for human_msg, assistant_msg in recent_history:
49
  messages.extend([
50
  {"role": "user", "content": human_msg},
51
  {"role": "assistant", "content": assistant_msg}
 
54
  # Add current message
55
  messages.append({"role": "user", "content": message})
56
 
57
+ # Apply chat template
58
  inputs = tokenizer.apply_chat_template(
59
  messages,
60
  add_generation_prompt=True,
 
93
 
94
  def chat_interface(message, history, temperature, max_length):
95
  """
96
+ Main chat interface function - FIXED to prevent infinite loops
97
  """
98
  if not message or not message.strip():
99
  return "", history or []
100
 
101
+ # Generate response with limited history context
102
  bot_response = generate_response(message, history or [], temperature, max_length)
103
 
104
+ # Update history - this will naturally grow but we limit context in generation
105
  new_history = (history or []) + [[message, bot_response]]
106
 
107
  return "", new_history
108
 
109
  def clear_chat():
110
  """
111
+ Clear the chat history - COMPLETELY RESET
112
  """
113
  return []
114
 
 
131
 
132
  return new_history
133
 
134
+ # Enhanced CSS for beautiful UI
135
  custom_css = """
136
  .gradio-container {
137
  font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
 
221
  padding: 30px !important;
222
  margin: 0 !important;
223
  border-radius: 0 !important;
224
+ overflow-y: auto !important;
225
  }
226
 
227
  #chatbot .message {
 
450
  background: #ed8936;
451
  box-shadow: 0 0 10px #ed8936;
452
  }
453
+
454
+ .chat-history-info {
455
+ text-align: center;
456
+ color: #718096;
457
+ font-size: 14px;
458
+ padding: 10px;
459
+ background: #f7fafc;
460
+ border-radius: 10px;
461
+ margin: 10px 0;
462
+ }
463
  """
464
 
465
  # Create the Gradio interface
 
477
  # Chatbot
478
  with gr.Column(elem_classes="chatbot-wrapper"):
479
  chatbot = gr.Chatbot(
480
+ value=[], # Start with empty chat
481
  label="",
482
  elem_id="chatbot",
483
  show_copy_button=True,
 
506
  clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", elem_classes="clear-btn")
507
  retry_btn = gr.Button("πŸ”„ Retry Last", elem_classes="retry-btn")
508
  gr.HTML("""<div style="flex: 1; text-align: center; color: #666; font-size: 14px; padding: 12px;">
509
+ ✨ Powered by Qwen3-0.6B β€’ Context limited to 6 recent messages
510
  </div>""")
511
 
512
  # Controls Section (Below Chat)
 
552
 
553
  **Model:** Qwen3-0.6B βœ…
554
  **Think Tags:** Auto-Removed βœ…
555
+ **Context:** Last 6 messages
556
 
557
  <div class="model-features">
558
  **Key Features:**
 
595
  )
596
 
597
  # Event handlers
598
+ def handle_submit(message, history, temp, max_len):
599
+ """Wrapper function to handle chat submission"""
600
+ if not message or not message.strip():
601
+ return "", history or []
602
+ return chat_interface(message, history, temp, max_len)
603
+
604
  submit_event = msg.submit(
605
+ fn=handle_submit,
606
  inputs=[msg, chatbot, temperature, max_length],
607
  outputs=[msg, chatbot]
608
  )
609
 
610
  submit_btn.click(
611
+ fn=handle_submit,
612
  inputs=[msg, chatbot, temperature, max_length],
613
  outputs=[msg, chatbot]
614
  )
615
 
616
  clear_btn.click(
617
+ fn=clear_chat,
618
  outputs=[chatbot]
619
  )
620
 
621
  retry_btn.click(
622
+ fn=retry_last_response,
623
  inputs=[chatbot, temperature, max_length],
624
  outputs=[chatbot]
625
  )