GhostDragon01 commited on
Commit
b58981e
Β·
1 Parent(s): 9685fdc

refactor: Reduce verbosity in agent outputs and enhance message formatting in chat interface

Browse files
src/agents/bookmarks_agent.py CHANGED
@@ -345,4 +345,7 @@ bookmarks_agent = CodeAgent(
345
  description="Specialized agent for Chrome bookmarks operations, focusing on AI ressources folder. Extracts bookmarks from Chrome and caches them in data/ai_bookmarks_cache.json to avoid direct interaction with Chrome's raw JSON. Provides search, filtering, statistics, and cache management for AI-related bookmarks.",
346
  max_steps=10,
347
  additional_authorized_imports=["json", "datetime", "urllib.parse", "pathlib"],
 
 
 
348
  )
 
345
  description="Specialized agent for Chrome bookmarks operations, focusing on AI ressources folder. Extracts bookmarks from Chrome and caches them in data/ai_bookmarks_cache.json to avoid direct interaction with Chrome's raw JSON. Provides search, filtering, statistics, and cache management for AI-related bookmarks.",
346
  max_steps=10,
347
  additional_authorized_imports=["json", "datetime", "urllib.parse", "pathlib"],
348
+ # Reduce verbosity
349
+ stream_outputs=False,
350
+ max_print_outputs_length=300,
351
  )
src/agents/categoriser_agent.py CHANGED
@@ -540,4 +540,7 @@ categoriser_agent = CodeAgent(
540
  description="Specializes in categorizing AI news and bookmarks into 10 predefined categories: Research & Breakthroughs, Model Releases & Updates, Tools/Frameworks/Platforms, Applications & Industry Use Cases, Regulation/Ethics/Policy, Investment/Funding/M&A, Benchmarks & Leaderboards, Community/Events/Education, Security/Privacy/Safety, and Market Trends & Analysis. Uses keyword-based categorization and provides tools for managing and searching categorized content.",
541
  max_steps=10,
542
  additional_authorized_imports=["json", "datetime", "re", "pathlib"],
 
 
 
543
  )
 
540
  description="Specializes in categorizing AI news and bookmarks into 10 predefined categories: Research & Breakthroughs, Model Releases & Updates, Tools/Frameworks/Platforms, Applications & Industry Use Cases, Regulation/Ethics/Policy, Investment/Funding/M&A, Benchmarks & Leaderboards, Community/Events/Education, Security/Privacy/Safety, and Market Trends & Analysis. Uses keyword-based categorization and provides tools for managing and searching categorized content.",
541
  max_steps=10,
542
  additional_authorized_imports=["json", "datetime", "re", "pathlib"],
543
+ # Reduce verbosity
544
+ stream_outputs=False,
545
+ max_print_outputs_length=300,
546
  )
src/agents/gmail_agent.py CHANGED
@@ -84,4 +84,6 @@ gmail_agent = CodeAgent(
84
  description="Gmail agent specialized in reading and searching emails from habib.adoum01@gmail.com and news@alphasignal.ai only",
85
  max_steps=10,
86
  additional_authorized_imports=["json"],
 
 
87
  )
 
84
  description="Gmail agent specialized in reading and searching emails from habib.adoum01@gmail.com and news@alphasignal.ai only",
85
  max_steps=10,
86
  additional_authorized_imports=["json"],
87
+ stream_outputs=False,
88
+ max_print_outputs_length=300,
89
  )
src/agents/manager_agent.py CHANGED
@@ -47,4 +47,7 @@ manager_agent = CodeAgent(
47
  additional_authorized_imports=["json"],
48
  # Add planning to help with complex queries
49
  planning_interval=3, # Plan every 3 steps to maintain focus
 
 
 
50
  )
 
47
  additional_authorized_imports=["json"],
48
  # Add planning to help with complex queries
49
  planning_interval=3, # Plan every 3 steps to maintain focus
50
+ # Reduce verbosity - disable streaming outputs and minimize console display
51
+ stream_outputs=False, # Disable live streaming of thoughts to terminal
52
+ max_print_outputs_length=500, # Limit output length to reduce terminal noise
53
  )
src/interfaces/gradio_interface.py CHANGED
@@ -307,15 +307,31 @@ Thanks to **Modal Labs**, **Hugging Face**, **Nebius**, **Anthropic**, **OpenAI*
307
  return about_tab
308
 
309
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  def validate_message_history(history):
311
  """Validate and return properly formatted message history"""
312
  validated = []
313
  for msg in history:
314
  if isinstance(msg, dict) and "role" in msg and "content" in msg:
315
- # Ensure content is a string
316
- if not isinstance(msg["content"], str):
317
- msg["content"] = str(msg["content"])
318
- validated.append(msg)
 
 
319
  else:
320
  print(f"Warning: Invalid message format detected: {msg}")
321
  return validated
@@ -336,20 +352,23 @@ def chat_with_agent(message: str, history: List) -> Generator[List, None, None]:
336
  if isinstance(item, dict):
337
  # Already a dict, check if it has required keys
338
  if "role" in item and "content" in item:
339
- formatted_history.append(item)
 
 
340
  else:
341
  # Skip malformed dict items
342
  print(f"Warning: Skipping malformed history item: {item}")
343
  continue
344
  elif hasattr(item, "role") and hasattr(item, "content"):
345
- # ChatMessage object - convert to dict
346
- formatted_history.append({"role": item.role, "content": item.content})
 
347
  elif isinstance(item, (list, tuple)) and len(item) == 2:
348
  # Legacy format: [user_message, assistant_message] or (user, assistant)
349
  # Convert to proper message format
350
  if isinstance(item[0], str) and isinstance(item[1], str):
351
- formatted_history.append({"role": "user", "content": item[0]})
352
- formatted_history.append({"role": "assistant", "content": item[1]})
353
  else:
354
  print(f"Warning: Skipping malformed history item: {item}")
355
  continue
@@ -391,10 +410,10 @@ def chat_with_agent(message: str, history: List) -> Generator[List, None, None]:
391
  step_content += f"πŸ” **Step {step.step_number}:** *In Progress...*\n\n"
392
 
393
  if hasattr(step, "thought") and step.thought:
394
- step_content += f"πŸ’­ **Thought:** {step.thought}\n\n"
395
 
396
  if hasattr(step, "action") and step.action:
397
- step_content += f"πŸ› οΈ **Action:** {step.action}\n\n"
398
 
399
  if hasattr(step, "observations") and step.observations:
400
  obs_text = str(step.observations)[:300]
@@ -403,7 +422,9 @@ def chat_with_agent(message: str, history: List) -> Generator[List, None, None]:
403
  step_content += f"πŸ‘οΈ **Observation:** {obs_text}\n\n"
404
 
405
  step_content += "⏳ *Processing next step...*"
406
- thinking_message["content"] = step_content
 
 
407
  new_history[-1] = thinking_message
408
  yield validate_message_history(new_history)
409
 
@@ -411,9 +432,10 @@ def chat_with_agent(message: str, history: List) -> Generator[List, None, None]:
411
  # If streaming fails, fall back to regular execution
412
  print(f"Streaming failed: {stream_error}, falling back to regular execution")
413
 
414
- thinking_message["content"] = (
415
- "⚑ **Agent Working** οΏ½οΏ½οΏ½οΏ½\n\nπŸ’« Processing your request using available tools...\n\n⏳ *Please wait...*"
416
- )
 
417
  new_history[-1] = thinking_message
418
  yield validate_message_history(new_history)
419
 
@@ -467,28 +489,35 @@ def chat_with_agent(message: str, history: List) -> Generator[List, None, None]:
467
  tool_usage_content = "Agent executed actions successfully"
468
 
469
  # Update thinking to show completion
470
- thinking_message["content"] = (
471
- "βœ… **Agent Complete** πŸŽ‰\n\nβœ… Request processed successfully\nβœ… Response prepared"
472
- )
 
473
  new_history[-1] = thinking_message
474
  yield validate_message_history(new_history)
475
 
476
  # Add tool usage message if there were tools used
477
  if tool_usage_content:
478
- tool_message = {"role": "assistant", "content": f"πŸ› οΈ **Tools & Actions Used**\n\n{tool_usage_content}"}
 
 
 
479
  new_history.append(tool_message)
480
  yield validate_message_history(new_history)
481
 
482
  # Add final response
483
  final_response = str(result) if result else "I couldn't process your request."
484
- final_message = {"role": "assistant", "content": final_response}
485
  new_history.append(final_message)
486
  yield validate_message_history(new_history)
487
  return
488
 
489
  # If we get here, streaming worked, so get the final result
490
  # The streaming should have shown all the steps, now get final answer
491
- thinking_message["content"] = "βœ… **Agent Complete** πŸŽ‰\n\nβœ… All steps executed\nβœ… Preparing final response"
 
 
 
492
  new_history[-1] = thinking_message
493
  yield validate_message_history(new_history)
494
 
@@ -500,7 +529,7 @@ def chat_with_agent(message: str, history: List) -> Generator[List, None, None]:
500
  if hasattr(last_step, "observations") and last_step.observations:
501
  final_response = str(last_step.observations)
502
 
503
- final_message = {"role": "assistant", "content": final_response}
504
  new_history.append(final_message)
505
  yield validate_message_history(new_history)
506
 
@@ -553,8 +582,8 @@ chat_interface = gr.ChatInterface(
553
  **⏱️ Processing Time Note:** Depending on the type of query, processing can take several seconds or minutes to complete.
554
  """,
555
  examples=[
 
556
  "πŸ”– Search my AI bookmarks",
557
- "πŸ“§ Show me my latest 5 emails",
558
  "πŸ€– Find emails about AI",
559
  "🌐 Search for latest AI news",
560
  "πŸ’Ž What AI resources do I have?",
@@ -566,7 +595,7 @@ chat_interface = gr.ChatInterface(
566
  "πŸš€ Show model releases bookmarks",
567
  "πŸ› οΈ Find tools and frameworks bookmarks",
568
  ],
569
- show_progress="hidden",
570
  )
571
 
572
  # Create categories and about interfaces
 
307
  return about_tab
308
 
309
 
310
+ def sanitize_content(content):
311
+ """Sanitize content to ensure it's a clean string without complex objects"""
312
+ if isinstance(content, str):
313
+ return content
314
+ elif isinstance(content, dict):
315
+ # If content is a dict, convert to string representation
316
+ return str(content)
317
+ elif hasattr(content, "__dict__"):
318
+ # If it's an object with attributes, convert to string
319
+ return str(content)
320
+ else:
321
+ return str(content)
322
+
323
+
324
  def validate_message_history(history):
325
  """Validate and return properly formatted message history"""
326
  validated = []
327
  for msg in history:
328
  if isinstance(msg, dict) and "role" in msg and "content" in msg:
329
+ # Ensure content is a string and properly formatted
330
+ content = sanitize_content(msg["content"])
331
+
332
+ # Create a clean message dict to avoid any nesting issues
333
+ clean_msg = {"role": str(msg["role"]), "content": content}
334
+ validated.append(clean_msg)
335
  else:
336
  print(f"Warning: Invalid message format detected: {msg}")
337
  return validated
 
352
  if isinstance(item, dict):
353
  # Already a dict, check if it has required keys
354
  if "role" in item and "content" in item:
355
+ # Ensure content is a simple string
356
+ content = sanitize_content(item["content"])
357
+ formatted_history.append({"role": str(item["role"]), "content": content})
358
  else:
359
  # Skip malformed dict items
360
  print(f"Warning: Skipping malformed history item: {item}")
361
  continue
362
  elif hasattr(item, "role") and hasattr(item, "content"):
363
+ # ChatMessage object - convert to dict with string content
364
+ content = sanitize_content(item.content)
365
+ formatted_history.append({"role": str(item.role), "content": content})
366
  elif isinstance(item, (list, tuple)) and len(item) == 2:
367
  # Legacy format: [user_message, assistant_message] or (user, assistant)
368
  # Convert to proper message format
369
  if isinstance(item[0], str) and isinstance(item[1], str):
370
+ formatted_history.append({"role": "user", "content": str(item[0])})
371
+ formatted_history.append({"role": "assistant", "content": str(item[1])})
372
  else:
373
  print(f"Warning: Skipping malformed history item: {item}")
374
  continue
 
410
  step_content += f"πŸ” **Step {step.step_number}:** *In Progress...*\n\n"
411
 
412
  if hasattr(step, "thought") and step.thought:
413
+ step_content += f"πŸ’­ **Thought:** {str(step.thought)}\n\n"
414
 
415
  if hasattr(step, "action") and step.action:
416
+ step_content += f"πŸ› οΈ **Action:** {str(step.action)}\n\n"
417
 
418
  if hasattr(step, "observations") and step.observations:
419
  obs_text = str(step.observations)[:300]
 
422
  step_content += f"πŸ‘οΈ **Observation:** {obs_text}\n\n"
423
 
424
  step_content += "⏳ *Processing next step...*"
425
+
426
+ # Ensure the content is a clean string
427
+ thinking_message = {"role": "assistant", "content": str(step_content)}
428
  new_history[-1] = thinking_message
429
  yield validate_message_history(new_history)
430
 
 
432
  # If streaming fails, fall back to regular execution
433
  print(f"Streaming failed: {stream_error}, falling back to regular execution")
434
 
435
+ thinking_message = {
436
+ "role": "assistant",
437
+ "content": "⚑ **Agent Working** πŸ”„\n\nπŸ’« Processing your request using available tools...\n\n⏳ *Please wait...*",
438
+ }
439
  new_history[-1] = thinking_message
440
  yield validate_message_history(new_history)
441
 
 
489
  tool_usage_content = "Agent executed actions successfully"
490
 
491
  # Update thinking to show completion
492
+ thinking_message = {
493
+ "role": "assistant",
494
+ "content": "βœ… **Agent Complete** πŸŽ‰\n\nβœ… Request processed successfully\nβœ… Response prepared",
495
+ }
496
  new_history[-1] = thinking_message
497
  yield validate_message_history(new_history)
498
 
499
  # Add tool usage message if there were tools used
500
  if tool_usage_content:
501
+ tool_message = {
502
+ "role": "assistant",
503
+ "content": f"πŸ› οΈ **Tools & Actions Used**\n\n{str(tool_usage_content)}",
504
+ }
505
  new_history.append(tool_message)
506
  yield validate_message_history(new_history)
507
 
508
  # Add final response
509
  final_response = str(result) if result else "I couldn't process your request."
510
+ final_message = {"role": "assistant", "content": str(final_response)}
511
  new_history.append(final_message)
512
  yield validate_message_history(new_history)
513
  return
514
 
515
  # If we get here, streaming worked, so get the final result
516
  # The streaming should have shown all the steps, now get final answer
517
+ thinking_message = {
518
+ "role": "assistant",
519
+ "content": "βœ… **Agent Complete** πŸŽ‰\n\nβœ… All steps executed\nβœ… Preparing final response",
520
+ }
521
  new_history[-1] = thinking_message
522
  yield validate_message_history(new_history)
523
 
 
529
  if hasattr(last_step, "observations") and last_step.observations:
530
  final_response = str(last_step.observations)
531
 
532
+ final_message = {"role": "assistant", "content": str(final_response)}
533
  new_history.append(final_message)
534
  yield validate_message_history(new_history)
535
 
 
582
  **⏱️ Processing Time Note:** Depending on the type of query, processing can take several seconds or minutes to complete.
583
  """,
584
  examples=[
585
+ "πŸ“§ Show me my latest 5 newsletters emails",
586
  "πŸ”– Search my AI bookmarks",
 
587
  "πŸ€– Find emails about AI",
588
  "🌐 Search for latest AI news",
589
  "πŸ’Ž What AI resources do I have?",
 
595
  "πŸš€ Show model releases bookmarks",
596
  "πŸ› οΈ Find tools and frameworks bookmarks",
597
  ],
598
+ show_progress="hidden"
599
  )
600
 
601
  # Create categories and about interfaces