Spaces:
Sleeping
Sleeping
File size: 10,872 Bytes
0f61832 903af12 8cf27dc 0f61832 065eebf 423cd4c 0f61832 b9ee2ea 065eebf b9ee2ea 065eebf b9ee2ea 065eebf 423cd4c 0f61832 065eebf 8cf27dc 0f61832 8cf27dc 0f61832 423cd4c 065eebf 423cd4c 0f61832 423cd4c 0f61832 423cd4c 0f61832 423cd4c 0f61832 423cd4c 0f61832 065eebf 0f61832 065eebf 0f61832 065eebf 0f61832 065eebf b9ee2ea 065eebf b9ee2ea 065eebf 0f61832 065eebf b9ee2ea 065eebf 423cd4c 0f61832 423cd4c 065eebf 0f61832 423cd4c 0f61832 423cd4c 0f61832 423cd4c 8cf27dc 423cd4c b9ee2ea 423cd4c b9ee2ea 423cd4c 065eebf b9ee2ea 423cd4c b9ee2ea 423cd4c b9ee2ea 423cd4c b9ee2ea 423cd4c b9ee2ea 423cd4c 065eebf 423cd4c 065eebf 423cd4c 0f61832 423cd4c 065eebf b9ee2ea 8cf27dc 065eebf b9ee2ea 423cd4c 0f61832 423cd4c 8cf27dc 423cd4c 065eebf b9ee2ea 065eebf b9ee2ea 065eebf b9ee2ea 065eebf b9ee2ea 065eebf b9ee2ea 065eebf b9ee2ea 065eebf 0f61832 065eebf 0f61832 065eebf b9ee2ea 065eebf 8cf27dc 0f61832 8cf27dc 065eebf b9ee2ea 065eebf b9ee2ea 065eebf b9ee2ea 0f61832 b9ee2ea 065eebf b9ee2ea 0f61832 065eebf b9ee2ea 065eebf 0f61832 b9ee2ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 |
import os
import gradio as gr
import requests
import pandas as pd
import re
import logging
from agent import initialize_agent # Import the agent initialization function
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# --- Logging Configuration ---
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s")
logger = logging.getLogger(__name__)
# --- Global Agent Initialization ---
# The agent is initialized once when the Space starts up.
# This is critical for performance and to avoid reloading the model on every request.
logger.info("π Application starting up! Initializing the GAIA agent...")
AGENT = initialize_agent()
if AGENT is None:
logger.error("π₯ FATAL: Agent initialization failed. The application will not be able to process questions.")
else:
logger.info("β
Agent initialized successfully.")
# --- Helper Functions ---
def _fetch_questions(api_url: str) -> list:
"""Fetches evaluation questions from the API."""
questions_url = f"{api_url}/questions"
logger.info(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
raise ValueError("Fetched questions list is empty or invalid format.")
logger.info(f"Fetched {len(questions_data)} questions.")
return questions_data
except requests.exceptions.RequestException as e:
raise RuntimeError(f"Error fetching questions: {e}") from e
except requests.exceptions.JSONDecodeError as e:
raise RuntimeError(f"Error decoding JSON response from questions endpoint: {e}. Response: {response.text[:500]}") from e
except Exception as e:
raise RuntimeError(f"An unexpected error occurred fetching questions: {e}") from e
def _run_agent_on_questions(agent, questions_data: list) -> tuple[list, list]:
"""Runs the agent on each question and collects answers and logs."""
results_log = []
answers_payload = []
logger.info(f"Running agent on {len(questions_data)} questions...")
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
logger.warning(f"Skipping item with missing task_id or question: {item}")
continue
try:
logger.info(f"Processing task {task_id}: {question_text[:100]}...")
# The agent wrapper returns the final, normalized answer directly.
submitted_answer = agent(question_text)
logger.info(f"Task {task_id} - Final answer from agent: {submitted_answer}")
answers_payload.append({
"task_id": task_id,
"submitted_answer": submitted_answer
})
results_log.append({
"Task ID": task_id,
"Question": question_text,
"Final Answer": submitted_answer
})
except Exception as e:
error_msg = f"AGENT ERROR: {e}"
logger.error(f"Error running agent on task {task_id}: {e}", exc_info=True)
answers_payload.append({
"task_id": task_id,
"submitted_answer": error_msg
})
results_log.append({
"Task ID": task_id,
"Question": question_text,
"Final Answer": error_msg
})
return answers_payload, results_log
def _submit_answers(api_url: str, username: str, agent_code_url: str, answers_payload: list) -> dict:
"""Submits the agent's answers to the evaluation API."""
submit_url = f"{api_url}/submit"
submission_data = {
"username": username.strip(),
"agent_code": agent_code_url,
"answers": answers_payload
}
logger.info(f"Submitting {len(answers_payload)} answers for user '{username}' to: {submit_url}")
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
error_detail = f"Server responded with status {e.response.status_code}."
try:
error_json = e.response.json()
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
except requests.exceptions.JSONDecodeError:
error_detail += f" Response: {e.response.text[:500]}"
raise RuntimeError(f"Submission Failed: {error_detail}") from e
except requests.exceptions.Timeout:
raise RuntimeError("Submission Failed: The request timed out.") from e
except requests.exceptions.RequestException as e:
raise RuntimeError(f"Submission Failed: Network error - {e}") from e
except Exception as e:
raise RuntimeError(f"An unexpected error occurred during submission: {e}") from e
# --- Main Gradio Function ---
def run_and_submit_all(profile: gr.OAuthProfile | None):
"""
Orchestrates the fetching of questions, running the agent, and submitting answers.
"""
if not profile:
logger.warning("Attempted to run evaluation without being logged in.")
return "Please Login to Hugging Face with the button above.", None
username = profile.username
logger.info(f"User '{username}' initiated evaluation.")
if AGENT is None:
return "β Error: The agent failed to initialize on startup. Please check the Space logs for details.", None
space_id = os.getenv("SPACE_ID")
if not space_id:
logger.error("SPACE_ID environment variable not found. Cannot determine agent_code URL.")
return "β Error: SPACE_ID not set. This is required for submission.", None
agent_code_url = f"https://huggingface.co/spaces/{space_id}/tree/main"
status_message = ""
results_df = pd.DataFrame()
results_log = []
try:
# 1. Fetch Questions
questions_data = _fetch_questions(DEFAULT_API_URL)
# 2. Run Agent on Questions (using the pre-initialized global agent)
answers_payload, results_log = _run_agent_on_questions(AGENT, questions_data)
if not answers_payload:
status_message = "Agent did not produce any answers to submit."
return status_message, pd.DataFrame(results_log)
# 3. Submit Answers
submission_result = _submit_answers(DEFAULT_API_URL, username, agent_code_url, answers_payload)
final_status = (
f"π Submission Successful!\n"
f"π€ User: {submission_result.get('username')}\n"
f"π Overall Score: {submission_result.get('score', 'N/A')}% "
f"({submission_result.get('correct_count', '?')}/{submission_result.get('total_attempted', '?')} correct)\n"
f"π¬ Message: {submission_result.get('message', 'No message received.')}\n"
f"π Agent Code: {agent_code_url}"
)
status_message = final_status
results_df = pd.DataFrame(results_log)
except RuntimeError as e:
status_message = f"β Operation Failed: {e}"
logger.error(status_message)
results_df = pd.DataFrame(results_log) if results_log else pd.DataFrame([{"Status": "Error", "Details": str(e)}])
except Exception as e:
status_message = f"π₯ Critical Error: {e}"
logger.error(status_message, exc_info=True)
results_df = pd.DataFrame([{"Status": "Critical Error", "Details": str(e)}])
return status_message, results_df
# --- Gradio Interface Definition ---
with gr.Blocks(title="GAIA Benchmark Agent", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# π§ GAIA Benchmark Evaluation Agent
**An advanced agent designed to tackle the General AI Assistant (GAIA) benchmark.**
""")
gr.Markdown("""
## π Instructions:
1. **Add Secrets**: If you have cloned this Space, go to the **Settings** tab and add your API keys as **Secrets**.
* `TOGETHER_API_KEY`: Your key from Together AI.
* `SERPAPI_API_KEY`: Your key from SerpApi for Google Search (optional but recommended).
2. **Login**: Use the button below to log in with your Hugging Face account. Your username is required for submission.
3. **Run**: Click 'Run Evaluation & Submit' to start the process. The agent will fetch all questions, solve them, and submit the answers automatically.
4. **Wait**: The process can take several minutes. You can monitor the progress in the status box and see detailed results in the table below.
---
### π― GAIA Answer Formatting
The agent is designed to automatically format answers according to GAIA's strict requirements (e.g., no commas in numbers, no articles in strings).
""")
with gr.Row():
gr.LoginButton(scale=1)
run_button = gr.Button("π Run Evaluation & Submit All Answers", variant="primary", scale=2)
status_output = gr.Textbox(
label="π Evaluation Status & Results",
lines=8,
interactive=False,
placeholder="Click 'Run Evaluation' to start the process..."
)
results_table = gr.DataFrame(
label="π Detailed Question Results",
wrap=True,
interactive=False,
column_widths=["10%", "60%", "30%"]
)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table]
)
if __name__ == "__main__":
print("\n" + "="*70)
print("π GAIA BENCHMARK AGENT STARTING UP")
print("="*70)
# Check environment variables loaded from HF Secrets
space_id = os.getenv("SPACE_ID")
together_key = os.getenv("TOGETHER_API_KEY")
serpapi_key = os.getenv("SERPAPI_API_KEY")
if space_id:
print(f"β
SPACE_ID: {space_id}")
print(f" - Submission URL will be: https://huggingface.co/spaces/{space_id}")
else:
print("β οΈ SPACE_ID not found - submissions will fail. This is normal for local dev.")
print(f"π API Keys Status (from Secrets):")
print(f" - Together AI: {'β
Set' if together_key else 'β Missing - Agent will fail to initialize!'}")
print(f" - SerpAPI: {'β
Set' if serpapi_key else 'β οΈ Missing - Google Search tool will be disabled.'}")
if not together_key:
print("\nβΌοΈ CRITICAL: TOGETHER_API_KEY is not set in the Space Secrets.")
print(" Please add it in the 'Settings' tab of your Space.")
print("="*70)
print("π― Launching Gradio Interface...")
print("="*70 + "\n")
demo.launch(debug=False, share=False) |