Spaces:
Sleeping
Sleeping
volker
commited on
Commit
·
86676cc
1
Parent(s):
a9491cd
Update prompt; fix requirements.
Browse files- app.py +7 -5
- basic_agent.py +11 -10
- requirements.txt +2 -1
app.py
CHANGED
@@ -152,7 +152,7 @@ def fetch_and_run_single(selected_id, questions_data, profile: gr.OAuthProfile |
|
|
152 |
print(f"User logged in: {profile.username}")
|
153 |
else:
|
154 |
print("User not logged in.")
|
155 |
-
return "Please Login to Hugging Face with the button.", None, None
|
156 |
|
157 |
try:
|
158 |
index = int(selected_id)
|
@@ -160,9 +160,9 @@ def fetch_and_run_single(selected_id, questions_data, profile: gr.OAuthProfile |
|
|
160 |
task_id = question_item.get("task_id")
|
161 |
question_text = question_item.get("question")
|
162 |
if not task_id or question_text is None:
|
163 |
-
return "Invalid question format received.", None, None
|
164 |
except Exception as e:
|
165 |
-
return f"Error selecting question: {e}", None, None
|
166 |
|
167 |
agent = init_agent()
|
168 |
generated_answer = agent(question_item)
|
@@ -171,7 +171,8 @@ def fetch_and_run_single(selected_id, questions_data, profile: gr.OAuthProfile |
|
|
171 |
"Question": question_text,
|
172 |
"Generated Answer": generated_answer
|
173 |
}])
|
174 |
-
|
|
|
175 |
|
176 |
|
177 |
# --- Build Gradio Interface using Blocks ---
|
@@ -230,11 +231,12 @@ with (gr.Blocks() as demo):
|
|
230 |
single_question_json = gr.JSON(label="Raw Question JSON")
|
231 |
single_status = gr.Textbox(label="Single Question Status", lines=2, interactive=False)
|
232 |
single_result_table = gr.DataFrame(label="Single Question and Answer", wrap=True)
|
|
|
233 |
|
234 |
single_run_button.click(
|
235 |
fn=fetch_and_run_single,
|
236 |
inputs=[question_id_dropdown, questions_data_state],
|
237 |
-
outputs=[single_status, single_result_table, single_question_json]
|
238 |
)
|
239 |
|
240 |
# All questions for submission run
|
|
|
152 |
print(f"User logged in: {profile.username}")
|
153 |
else:
|
154 |
print("User not logged in.")
|
155 |
+
return "Please Login to Hugging Face with the button.", None, None, []
|
156 |
|
157 |
try:
|
158 |
index = int(selected_id)
|
|
|
160 |
task_id = question_item.get("task_id")
|
161 |
question_text = question_item.get("question")
|
162 |
if not task_id or question_text is None:
|
163 |
+
return "Invalid question format received.", None, None, []
|
164 |
except Exception as e:
|
165 |
+
return f"Error selecting question: {e}", None, None, []
|
166 |
|
167 |
agent = init_agent()
|
168 |
generated_answer = agent(question_item)
|
|
|
171 |
"Question": question_text,
|
172 |
"Generated Answer": generated_answer
|
173 |
}])
|
174 |
+
logs = agent.get_logs()
|
175 |
+
return "Fetched and ran agent on selected question.", result_df, question_item, logs
|
176 |
|
177 |
|
178 |
# --- Build Gradio Interface using Blocks ---
|
|
|
231 |
single_question_json = gr.JSON(label="Raw Question JSON")
|
232 |
single_status = gr.Textbox(label="Single Question Status", lines=2, interactive=False)
|
233 |
single_result_table = gr.DataFrame(label="Single Question and Answer", wrap=True)
|
234 |
+
single_question_logs = gr.JSON(label="Agent logs")
|
235 |
|
236 |
single_run_button.click(
|
237 |
fn=fetch_and_run_single,
|
238 |
inputs=[question_id_dropdown, questions_data_state],
|
239 |
+
outputs=[single_status, single_result_table, single_question_json, single_question_logs]
|
240 |
)
|
241 |
|
242 |
# All questions for submission run
|
basic_agent.py
CHANGED
@@ -71,19 +71,20 @@ class BasicSmolAgent:
|
|
71 |
if not model:
|
72 |
model = HfApiModel()
|
73 |
search_tool = DuckDuckGoSearchToolWH()
|
74 |
-
self.agent = CodeAgent(tools=[search_tool], model=model)
|
75 |
-
self.prompt = ("
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
" make sure you don’t include the text “FINAL ANSWER” in your submission, just reply with the answer and nothing else."
|
83 |
-
" The question is the following: {}")
|
84 |
# Load the Whisper pipeline
|
85 |
self.mp3_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base")
|
86 |
|
|
|
|
|
|
|
87 |
def __call__(self, question_item: dict) -> str:
|
88 |
task_id = question_item.get("task_id")
|
89 |
question_text = question_item.get("question")
|
|
|
71 |
if not model:
|
72 |
model = HfApiModel()
|
73 |
search_tool = DuckDuckGoSearchToolWH()
|
74 |
+
self.agent = CodeAgent(tools=[search_tool], model=model, max_steps=10)
|
75 |
+
self.prompt = ("The question is the following:\n ```{}```"
|
76 |
+
" YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings."
|
77 |
+
" If you are asked for a number, don't use comma to write your number neither use units"
|
78 |
+
" such as $ or percent sign unless specified otherwise. If you are asked for a string,"
|
79 |
+
" don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise."
|
80 |
+
" If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."
|
81 |
+
)
|
|
|
|
|
82 |
# Load the Whisper pipeline
|
83 |
self.mp3_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base")
|
84 |
|
85 |
+
def get_logs(self):
|
86 |
+
return self.agent.memory.steps
|
87 |
+
|
88 |
def __call__(self, question_item: dict) -> str:
|
89 |
task_id = question_item.get("task_id")
|
90 |
question_text = question_item.get("question")
|
requirements.txt
CHANGED
@@ -5,4 +5,5 @@ python-dotenv
|
|
5 |
pandas
|
6 |
duckduckgo_search
|
7 |
smolagents
|
8 |
-
transformers
|
|
|
|
5 |
pandas
|
6 |
duckduckgo_search
|
7 |
smolagents
|
8 |
+
transformers
|
9 |
+
torch
|