Spaces:
Runtime error
Runtime error
File size: 10,725 Bytes
c92f47b 10e9b7d eccf8e4 8fcb930 c92f47b 613fdf5 e80aab9 3db6293 e80aab9 31243f4 8e4d2c9 c92f47b 785687c c92f47b 8e4d2c9 c92f47b 8e4d2c9 c92f47b 2418331 c92f47b 2418331 c92f47b 613fdf5 c92f47b 613fdf5 c92f47b 613fdf5 c92f47b 613fdf5 c92f47b 3c4371f c92f47b 7e4a06b c92f47b 3c4371f 7e4a06b c92f47b 3c4371f 7e4a06b 31243f4 e80aab9 c92f47b 31243f4 c92f47b 36ed51a 3c4371f 7d65c66 eccf8e4 31243f4 7d65c66 31243f4 7d65c66 c92f47b e80aab9 c92f47b 7d65c66 c92f47b 31243f4 c92f47b 31243f4 7d65c66 31243f4 c92f47b 31243f4 c92f47b 7d65c66 c92f47b e80aab9 7d65c66 e80aab9 c92f47b 31243f4 e80aab9 c92f47b e80aab9 c92f47b 7d65c66 c92f47b e80aab9 c92f47b e80aab9 c92f47b e80aab9 7e4a06b c92f47b e80aab9 c92f47b e80aab9 c92f47b 3c4371f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 |
import os
import gradio as gr
import requests
import inspect
import pandas as pd
import re
import json
from openai import OpenAI
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
class BasicAgent:
def __init__(self):
print("BasicAgent initialized.")
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("OPENAI_API_KEY environment variable not found.")
self.client = OpenAI(api_key=api_key)
self.model = "gpt-4o" # Best for function calling
# Define available tools
self.tools = [
{
"type": "function",
"function": {
"name": "web_search",
"description": "Search the web for current information on any topic",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query"
}
},
"required": ["query"]
}
}
},
{
"type": "function",
"function": {
"name": "wikipedia_search",
"description": "Get factual information from Wikipedia",
"parameters": {
"type": "object",
"properties": {
"topic": {
"type": "string",
"description": "Topic to search on Wikipedia"
}
},
"required": ["topic"]
}
}
}
]
def web_search(self, query: str) -> str:
"""Search using DuckDuckGo API"""
try:
url = "https://api.duckduckgo.com/"
params = {'q': query, 'format': 'json', 'no_html': '1'}
response = requests.get(url, params=params, timeout=10)
data = response.json()
result = ""
if data.get('AbstractText'):
result += f"Summary: {data['AbstractText']}\n"
if data.get('RelatedTopics'):
for topic in data['RelatedTopics'][:3]:
if isinstance(topic, dict) and 'Text' in topic:
result += f"- {topic['Text']}\n"
return result or f"No results found for: {query}"
except Exception as e:
return f"Search error: {str(e)}"
def wikipedia_search(self, topic: str) -> str:
"""Get Wikipedia summary"""
try:
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic.replace(' ', '_')}"
response = requests.get(url, timeout=10)
if response.status_code == 200:
data = response.json()
if 'extract' in data:
return f"Wikipedia: {data['extract'][:800]}..."
return f"No Wikipedia entry found for: {topic}"
except Exception as e:
return f"Wikipedia error: {str(e)}"
def execute_function(self, name: str, arguments: dict) -> str:
"""Execute the requested function"""
if name == "web_search":
return self.web_search(arguments.get("query", ""))
elif name == "wikipedia_search":
return self.wikipedia_search(arguments.get("topic", ""))
return f"Unknown function: {name}"
def extract_boxed_answer(self, text: str) -> str:
"""Extract answer from \\boxed{} or \\text{}"""
# Try boxed first
boxed_pattern = r'\\boxed\{([^{}]*(?:\{[^{}]*\}[^{}]*)*)\}'
matches = re.findall(boxed_pattern, text)
if matches:
return matches[-1].strip()
# Try text
text_pattern = r'\\text\{([^{}]*(?:\{[^{}]*\}[^{}]*)*)\}'
matches = re.findall(text_pattern, text)
if matches:
return matches[-1].strip()
# Fallback patterns
fallback_patterns = [
r'(?:final answer|answer):\s*(.+?)(?:\n|$)',
r'(?:the answer is):\s*(.+?)(?:\n|$)',
]
for pattern in fallback_patterns:
matches = re.findall(pattern, text, re.IGNORECASE)
if matches:
return matches[-1].strip()
# Clean up any LaTeX and return
final_text = text.strip()
final_text = re.sub(r'\\text\{([^{}]*)\}', r'\1', final_text)
final_text = re.sub(r'\\boxed\{([^{}]*)\}', r'\1', final_text)
return final_text.strip()
def __call__(self, question: str) -> str:
print(f"Processing question: {question[:50]}...")
try:
system_prompt = """You are an expert problem solver with access to search tools.
For questions requiring current info, facts, or research, use the available tools first.
Think step by step, then provide your final answer.
CRITICAL: End with your final answer in this format: \\boxed{your_answer}
Examples:
- "Based on my search, the answer is 42. \\boxed{42}"
- "According to Wikipedia, it's Paris. \\boxed{Paris}"
Only use \\boxed{} for your final answer."""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": question}
]
# Allow up to 3 tool calls to prevent infinite loops
for iteration in range(3):
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
tools=self.tools,
tool_choice="auto",
max_tokens=1500,
temperature=0.1
)
response_message = response.choices[0].message
messages.append({
"role": "assistant",
"content": response_message.content,
"tool_calls": response_message.tool_calls
})
# If no tool calls, we're done
if not response_message.tool_calls:
final_response = response_message.content
break
# Execute tool calls
for tool_call in response_message.tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
print(f"Calling {function_name} with {function_args}")
result = self.execute_function(function_name, function_args)
messages.append({
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": result
})
final_answer = self.extract_boxed_answer(final_response)
print(f"Final answer: {final_answer}")
return final_answer
except Exception as e:
return f"Error: {str(e)}"
# [Rest of the code remains the same - just the agent class changed]
def run_and_submit_all(profile: gr.OAuthProfile | None):
"""Fetches questions, runs agent, submits answers"""
space_id = os.getenv("SPACE_ID")
if profile:
username = f"{profile.username}"
print(f"User logged in: {username}")
else:
return "Please Login to Hugging Face.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# 1. Instantiate Agent
try:
agent = BasicAgent()
except Exception as e:
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
# 2. Fetch Questions
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
print(f"Fetched {len(questions_data)} questions.")
except Exception as e:
return f"Error fetching questions: {e}", None
# 3. Run Agent
results_log = []
answers_payload = []
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
continue
try:
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
except Exception as e:
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"ERROR: {e}"})
# 4. Submit
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')})\n"
f"Message: {result_data.get('message', 'No message')}"
)
return final_status, pd.DataFrame(results_log)
except Exception as e:
return f"Submission failed: {e}", pd.DataFrame(results_log)
# --- Gradio Interface ---
with gr.Blocks() as demo:
gr.Markdown("# AI Agent with Tool Calling")
gr.Markdown("""
**Features:**
- Web search via DuckDuckGo
- Wikipedia lookup
- Smart tool selection by GPT-4o
- Robust answer extraction
**Setup:** Add OPENAI_API_KEY to repository secrets
""")
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit")
status_output = gr.Textbox(label="Status", lines=5, interactive=False)
results_table = gr.DataFrame(label="Results", wrap=True)
run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
if __name__ == "__main__":
print("Starting AI Agent with Tool Calling...")
demo.launch(debug=True, share=False) |