mjschock commited on
Commit
c43be1d
·
unverified ·
1 Parent(s): 5c0be56

Integrate telemetry support in main.py by registering the SmolagentsInstrumentor for enhanced monitoring. Update the main function to simplify question enhancement instructions and improve clarity. Modify extract_final_answer utility to prioritize 'final_answer_text' in results. Update requirements.txt to include telemetry dependencies for smolagents.

Browse files
Files changed (3) hide show
  1. main.py +10 -26
  2. requirements.txt +1 -1
  3. utils.py +4 -0
main.py CHANGED
@@ -19,8 +19,12 @@ from agents import create_data_analysis_agent, create_media_agent, create_web_ag
19
  from prompts import MANAGER_SYSTEM_PROMPT
20
  from tools import perform_calculation, web_search
21
  from utils import extract_final_answer
 
 
22
 
23
  litellm._turn_on_debug()
 
 
24
 
25
  # Configure logging
26
  logging.basicConfig(
@@ -280,22 +284,14 @@ async def run_with_streaming(task: str, thread_id: str) -> dict:
280
 
281
 
282
  def main(task: str, thread_id: str = str(uuid.uuid4())):
283
- # Enhance the question with instructions specific to GAIA tasks
284
  enhanced_question = f"""
285
- GAIA Benchmark Question: {task}
286
 
287
- This is a multi-step reasoning problem from the GAIA benchmark. Please solve it by:
288
-
289
- 1. Breaking the question down into clear logical steps
290
- 2. Using the appropriate specialized agents when needed:
291
- - web_agent for web searches and browsing
292
- - data_agent for data analysis and calculations
293
- - media_agent for working with images and PDFs
294
- 3. Tracking your progress through the problem
295
- 4. Providing your final answer in EXACTLY the format requested by the question
296
-
297
- IMPORTANT: GAIA questions often involve multiple steps of information gathering and reasoning.
298
- You must follow the chain of reasoning completely and provide the exact format requested.
299
  """
300
 
301
  logger.info(
@@ -304,18 +300,6 @@ def main(task: str, thread_id: str = str(uuid.uuid4())):
304
  result = asyncio.run(run_with_streaming(enhanced_question, thread_id))
305
  logger.info("Agent run finished.")
306
 
307
- # Print final results
308
- # print("\n--- Execution Results ---")
309
- # print(f"Number of Steps: {len(result.get('steps', []))}")
310
- # # Optionally print step details
311
- # # for i, step in enumerate(result.get('steps', [])):
312
- # # print(f"Step {i+1} Details: {step}")
313
- # print(f"Final Answer: {result.get('final_answer') or 'Not found'}")
314
- # if err := result.get("error"):
315
- # print(f"Error: {err}")
316
-
317
- # return result.get("final_answer")
318
-
319
  logger.info(f"Result: {result}")
320
  return extract_final_answer(result)
321
 
 
19
  from prompts import MANAGER_SYSTEM_PROMPT
20
  from tools import perform_calculation, web_search
21
  from utils import extract_final_answer
22
+ from phoenix.otel import register
23
+ from openinference.instrumentation.smolagents import SmolagentsInstrumentor
24
 
25
  litellm._turn_on_debug()
26
+ register()
27
+ SmolagentsInstrumentor().instrument()
28
 
29
  # Configure logging
30
  logging.basicConfig(
 
284
 
285
 
286
  def main(task: str, thread_id: str = str(uuid.uuid4())):
287
+ # Enhance the question with minimal instructions
288
  enhanced_question = f"""
289
+ GAIA Question: {task}
290
 
291
+ Please solve this multi-step reasoning problem by:
292
+ 1. Breaking it down into logical steps
293
+ 2. Using specialized agents when needed
294
+ 3. Providing the final answer in the exact format requested
 
 
 
 
 
 
 
 
295
  """
296
 
297
  logger.info(
 
300
  result = asyncio.run(run_with_streaming(enhanced_question, thread_id))
301
  logger.info("Agent run finished.")
302
 
 
 
 
 
 
 
 
 
 
 
 
 
303
  logger.info(f"Result: {result}")
304
  return extract_final_answer(result)
305
 
requirements.txt CHANGED
@@ -8,6 +8,6 @@ pytest>=8.3.5
8
  pytest-cov>=6.1.1
9
  python-dotenv>=1.0.0
10
  requests>=2.32.3
11
- smolagents[litellm]>=0.1.3
12
  typing-extensions>=4.5.0
13
  wikipedia-api>=0.8.1
 
8
  pytest-cov>=6.1.1
9
  python-dotenv>=1.0.0
10
  requests>=2.32.3
11
+ smolagents[litellm,telemetry]>=0.1.3
12
  typing-extensions>=4.5.0
13
  wikipedia-api>=0.8.1
utils.py CHANGED
@@ -15,6 +15,10 @@ def extract_final_answer(result: Union[str, dict]) -> str:
15
  """
16
  # Handle dictionary input
17
  if isinstance(result, dict):
 
 
 
 
18
  if "final_answer" in result:
19
  return str(result["final_answer"])
20
  return "No final answer found in result"
 
15
  """
16
  # Handle dictionary input
17
  if isinstance(result, dict):
18
+ # Check for final_answer_text first (from agent output)
19
+ if "final_answer_text" in result:
20
+ return str(result["final_answer_text"])
21
+ # Fall back to final_answer key
22
  if "final_answer" in result:
23
  return str(result["final_answer"])
24
  return "No final answer found in result"