File size: 6,937 Bytes
10e9b7d
 
eccf8e4
c416910
3c4371f
c43e884
c416910
 
c43e884
a9a3ec1
3db6293
e80aab9
e90a59c
ea8d34e
e90a59c
f5312d4
c43e884
0e37d38
c416910
31243f4
49aa693
2ba19e9
d7467ae
d1458e5
c416910
d1458e5
31243f4
c416910
0e37d38
f5312d4
c416910
36ed51a
3c4371f
c416910
eccf8e4
c416910
7d65c66
1dc2d91
c416910
 
0e37d38
7d65c66
c416910
0e37d38
e80aab9
c416910
 
 
582b41a
c416910
31243f4
c416910
 
31243f4
4d674d3
0e37d38
c416910
d646f48
582b41a
c416910
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
791c8cd
c416910
582b41a
c416910
582b41a
c416910
 
17292c0
c416910
791c8cd
c416910
 
 
 
 
98f0864
c416910
 
582b41a
c416910
 
 
 
 
 
 
 
 
 
 
 
0e37d38
31243f4
c416910
 
 
e80aab9
c416910
 
 
 
 
e80aab9
c416910
 
 
e80aab9
c416910
 
7d65c66
c416910
 
 
d646f48
e80aab9
c416910
c43e884
c416910
 
 
 
 
 
 
c43e884
e80aab9
7e4a06b
e80aab9
c416910
 
 
 
 
 
7d65c66
d646f48
c416910
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import os
import gradio as gr
import requests
import inspect
import pandas as pd
import asyncio
from smolagents import ToolCallingAgent, InferenceClientModel, HfApiModel
from smolagents import DuckDuckGoSearchTool, Tool, CodeAgent
from huggingface_hub import login
#h
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"

login(token=os.environ["HUGGINGFACEHUB_API_TOKEN"])

search_tool = DuckDuckGoSearchTool()

async def run_and_submit_all(profile: gr.OAuthProfile | None):
    log_output = ""

    try:
        agent = CodeAgent(
            tools=[search_tool],
            model=HfApiModel(model="MiniMaxAI/MiniMax-M1-80k"),
            max_steps=5,
            verbosity_level=2        
        )
    except Exception as e:
        yield f"Error initializing agent: {e}", None, log_output
        return

    space_id = os.getenv("SPACE_ID")
    agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"

    questions_url = f"{DEFAULT_API_URL}/questions"
    try:
        response = requests.get(questions_url, timeout=15)
        response.raise_for_status()
        questions_data = response.json()[:5]
        if not questions_data:
            yield "Fetched questions list is empty or invalid format.", None, log_output
            return
    except Exception as e:
        yield f"Error fetching questions: {e}", None, log_output
        return

    results_log = []
    answers_payload = []
    loop = asyncio.get_event_loop()

    for item in questions_data:
        task_id = item.get("task_id")
        question_text = item.get("question")
        if not task_id or question_text is None:
            continue

        log_output += f"πŸ” Solving Task ID: {task_id}...\n"
        yield None, None, log_output

        try:
            system_prompt = (
                "You are a general AI assistant. I will ask you a question. "
                "Report your thoughts, and finish your answer with the following template: "
                "FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. "
                "If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. "
                "If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. "
                "If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.\n\n"
            )
            full_prompt = system_prompt + f"Question: {question_text.strip()}"

            agent_result = await loop.run_in_executor(None, agent, full_prompt)

            # Extract final answer cleanly
            if isinstance(agent_result, dict) and "final_answer" in agent_result:
                final_answer = str(agent_result["final_answer"]).strip()
            elif isinstance(agent_result, str):
                response_text = agent_result.strip()

                # Remove known boilerplate
                if "Here is the final answer from your managed agent" in response_text:
                    response_text = response_text.split(":", 1)[-1].strip()

                if "FINAL ANSWER:" in response_text:
                    _, final_answer = response_text.rsplit("FINAL ANSWER:", 1)
                    final_answer = final_answer.strip()
                else:
                    final_answer = response_text
            else:
                final_answer = str(agent_result).strip()

            answers_payload.append({
                "task_id": task_id,
                "submitted_answer": final_answer
            })

            results_log.append({
                "Task ID": task_id,
                "Question": question_text,
                "Submitted Answer": final_answer
            })

            log_output += f"βœ… Done: {task_id} β€” Answer: {final_answer[:60]}\n"
            yield None, None, log_output

        except Exception as e:
            print(f"Error running agent on task {task_id}: {e}")
            results_log.append({
                "Task ID": task_id,
                "Question": question_text,
                "Submitted Answer": f"AGENT ERROR: {e}"
            })
            log_output += f"⛔️ Error: {task_id} β€” {e}\n"
            yield None, None, log_output

    if not answers_payload:
        yield "Agent did not produce any answers to submit.", pd.DataFrame(results_log), log_output
        return

    username = profile.username if profile else "unknown"
    submit_url = f"{DEFAULT_API_URL}/submit"
    submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
    try:
        response = requests.post(submit_url, json=submission_data, timeout=60)
        response.raise_for_status()
        result_data = response.json()
        final_status = (
            f"Submission Successful!\n"
            f"User: {result_data.get('username')}\n"
            f"Overall Score: {result_data.get('score', 'N/A')}% "
            f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
            f"Message: {result_data.get('message', 'No message received.')}"
        )
        results_df = pd.DataFrame(results_log)
        yield final_status, results_df, log_output
    except Exception as e:
        status_message = f"Submission Failed: {e}"
        results_df = pd.DataFrame(results_log)
        yield status_message, results_df, log_output

with gr.Blocks() as demo:
    gr.Markdown("# Basic Agent Evaluation Runner")
    gr.Markdown("""
    **Instructions:**
    1. Clone this space and define your agent logic.
    2. Log in to your Hugging Face account.
    3. Click 'Run Evaluation & Submit All Answers'.
    ---
    **Note:**
    The run may take time. Async is now used to improve responsiveness.
    """)

    gr.LoginButton()

    run_button = gr.Button("Run Evaluation & Submit All Answers")
    status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
    results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
    progress_log = gr.Textbox(label="Progress Log", lines=10, interactive=False)

    run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table, progress_log])

if __name__ == "__main__":
    print("\n" + "-"*30 + " App Starting " + "-"*30)
    space_host_startup = os.getenv("SPACE_HOST")
    space_id_startup = os.getenv("SPACE_ID")

    if space_host_startup:
        print(f"βœ… SPACE_HOST: https://{space_host_startup}.hf.space")
    if space_id_startup:
        print(f"βœ… SPACE_ID: https://huggingface.co/spaces/{space_id_startup}")

    print("Launching Gradio Interface...")
    demo.launch(debug=True, share=False)