Spaces:
Running
Running
import asyncio | |
from collections import defaultdict | |
from pydantic import BaseModel | |
import json | |
class StepAnalysis(BaseModel): | |
description: str | |
action_type: str | |
assessment: str | |
success: bool | |
headline: str | |
class TaskSummary(BaseModel): | |
overview: str | |
key_successes: str | |
main_challenges: str | |
overall_assessment: str | |
async def analyze_agent_steps(processed_calls, llm_client, llm_eval=False): | |
task_calls = defaultdict(list) | |
for call in processed_calls: | |
task_calls[call['weave_task_id']].append(call) | |
for task_id in task_calls: | |
# sort calls by timestamp and handle null timestamps | |
for call in task_calls[task_id]: | |
if call['created_timestamp'] is None: | |
call['created_timestamp'] = 0 | |
task_calls[task_id].sort(key=lambda x: x['created_timestamp']) | |
tasks = [analyze_task(calls, llm_client, llm_eval) for task_id, calls in task_calls.items()] | |
task_analyses = await asyncio.gather(*tasks) | |
return dict(zip(task_calls.keys(), task_analyses)) | |
async def analyze_task(calls, llm_client, llm_eval=False): | |
if llm_eval: | |
step_tasks = [analyze_step(call, i+1, len(calls), llm_client) for i, call in enumerate(calls)] | |
steps = await asyncio.gather(*step_tasks) | |
else: | |
steps = [] | |
for i, call in enumerate(calls): | |
steps.append({ | |
'call_data': call, | |
'analysis': dict(StepAnalysis( | |
description="Not available", | |
action_type='other', | |
success=False, | |
assessment="Not available", | |
headline="Not available" | |
)) | |
}) | |
try: | |
if llm_eval: | |
task_analysis = await summarize_task(steps, llm_client) | |
return { | |
'steps': steps, | |
'task_analysis': task_analysis | |
} | |
else: | |
return { | |
'steps': steps, | |
'task_analysis': dict(TaskSummary( | |
overview="Not available", | |
key_successes='Not available', | |
main_challenges='Not available', | |
overall_assessment="Not available" | |
)) | |
} | |
except Exception as e: | |
print(f"Error in task summarization: {str(e)}") | |
return dict(TaskSummary( | |
overview="Not available", | |
key_successes='Not available', | |
main_challenges='Not available', | |
overall_assessment="Not available" | |
)) | |
async def analyze_step(call, step_number, total_steps, llm_client): | |
prompt = f""" | |
Analyze Step {step_number}/{total_steps} of the AI agent's USACO task solution: | |
Input: {call['inputs']} | |
Output: {call['outputs']} | |
Exception: {call['exception']} | |
Summary: {call['summary']} | |
Provide a detailed, technical analysis with the following: | |
1. Specific Description: Describe precisely what the agent did in this step, including any algorithms, data structures, or problem-solving techniques employed. | |
2. Action Classification: Categorize the action as one of: | |
- 'plan': Strategizing or outlining an approach | |
- 'tool': Using a specific programming construct or algorithm | |
- 'retrieve': Accessing or utilizing external information | |
- 'other': Any action that doesn't fit the above categories | |
3. Technical Evaluation: Assess the technical merit of the agent's approach. Comment on efficiency, correctness, and adherence to USACO problem-solving best practices. | |
4. Success: Determine if the agent successfully completed its intended action. | |
5. Concise Headline: Write a technically precise headline (max 7 words) that captures the essence of this step. | |
Your analysis should be highly specific to this task. Avoid generalities and focus on the technical details of the agent's approach to this particular problem. | |
""" | |
system_message = "You are an expert in AI agent design and evaluation. Analyze the AI agent's actions with the depth and specificity expected in a detailed expert review. Focus on providing insights that would be valuable to an AI researcher specializing in AI agent development." | |
analysis = await llm_client.generate_text(prompt, system_message, response_format=StepAnalysis) | |
try: | |
analysis = json.loads(analysis) | |
except json.JSONDecodeError: | |
print(f"Error parsing analysis for step {step_number} of {total_steps} in task {call['weave_task_id']}. Using default values.") | |
analysis = print(f"Error in analysis for step {step_number} of {total_steps} in task {call['weave_task_id']}: {str(e)}") | |
analysis = dict(StepAnalysis( | |
description="Analysis failed", | |
category='other', | |
success=False, | |
assessment="Unable to assess due to error" | |
)) | |
return { | |
'call_data': call, | |
'analysis': analysis | |
} | |
async def summarize_task(steps, llm_client): | |
steps_summary = "\n".join([f"Step {i+1}: {step['analysis']}" for i, step in enumerate(steps)]) | |
prompt = f""" | |
Provide a comprehensive analysis of the AI agent's approach to solving this USACO task: | |
{steps_summary} | |
Your analysis should include: | |
1. Technical Overview: Describe the agent's overall problem-solving strategy, highlighting specific actions and techniques used throughout the task. | |
2. Key Achievements: Identify and explain the most significant breakthroughs or efficient implementations demonstrated by the agent. | |
3. Technical Challenges: Analyze the primary obstacles encountered, focusing on difficulties or conceptual misunderstandings in the context of the task. | |
4. Performance Evaluation: Assess the agent's overall performance, considering factors such as time complexity, space efficiency, code quality, and adherence to competitive programming best practices. | |
Your summary should be highly technical and specific to this task. Assume the reader is an expert as well and familiar with the task context. Focus on providing insights that would be valuable to an AI researcher specializing in AI agent development. | |
""" | |
system_message = "You are an expert AI performance analyst, skilled in evaluating and summarizing AI agent task execution. You are specialized in providing analyses to support AI researchers to develop AI agents." | |
analysis = await llm_client.generate_text(prompt, system_message, response_format=TaskSummary) | |
return json.loads(analysis) | |