Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,8 @@ import pandas as pd
|
|
6 |
from google import genai
|
7 |
from google.genai import types
|
8 |
import time
|
9 |
-
from smolagents import CodeAgent,
|
|
|
10 |
|
11 |
# (Keep Constants as is)
|
12 |
# --- Constants ---
|
@@ -153,59 +154,79 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
153 |
|
154 |
|
155 |
|
156 |
-
# Setup Gemini Client
|
157 |
-
api_key = os.getenv("GEMINI_API_KEY")
|
158 |
-
if not api_key:
|
159 |
-
|
160 |
-
os.environ["GOOGLE_API_KEY"] = api_key
|
161 |
-
client = genai.Client()
|
162 |
-
model_id = "gemini-2.0-flash-exp"
|
163 |
-
|
164 |
-
generation_config = types.GenerateContentConfig(
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
)
|
173 |
-
|
|
|
|
|
|
|
|
|
174 |
# Define the real agent
|
175 |
class BasicAgent:
|
176 |
def __init__(self):
|
177 |
-
print("
|
178 |
|
179 |
-
|
|
|
|
|
|
|
|
|
180 |
|
|
|
181 |
self.agent = CodeAgent(
|
182 |
-
tools=[
|
183 |
-
|
184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
)
|
186 |
|
187 |
-
def __call__(self,
|
188 |
-
"""
|
189 |
-
try:
|
190 |
-
response = client.models.generate_content(
|
191 |
-
model=model_id,
|
192 |
-
contents=[{"role": "user", "parts": [{"text": prompt}]}],
|
193 |
-
config=generation_config
|
194 |
-
)
|
195 |
-
answer = response.text
|
196 |
-
time.sleep(7)
|
197 |
-
return answer
|
198 |
-
except Exception as e:
|
199 |
-
return f"Error during Gemini call: {str(e)}"
|
200 |
-
|
201 |
-
def run_agent(self, question: str) -> str:
|
202 |
-
"""Run the agent on a question."""
|
203 |
print(f"Running agent for task: {question[:50]}...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
try:
|
205 |
-
result = self.agent.run(
|
206 |
-
return result
|
207 |
except Exception as e:
|
208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
|
210 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
211 |
"""
|
|
|
6 |
from google import genai
|
7 |
from google.genai import types
|
8 |
import time
|
9 |
+
from smolagents import CodeAgent, DuckDuckGoSearchTool, WikipediaSearchTool, PythonInterpreterTool, FinalAnswerTool, LiteLLMModel, tool
|
10 |
+
|
11 |
|
12 |
# (Keep Constants as is)
|
13 |
# --- Constants ---
|
|
|
154 |
|
155 |
|
156 |
|
157 |
+
# # Setup Gemini Client
|
158 |
+
# api_key = os.getenv("GEMINI_API_KEY")
|
159 |
+
# if not api_key:
|
160 |
+
# raise ValueError("GEMINI_API_KEY is missing.")
|
161 |
+
# os.environ["GOOGLE_API_KEY"] = api_key
|
162 |
+
# client = genai.Client()
|
163 |
+
# model_id = "gemini-2.0-flash-exp"
|
164 |
+
|
165 |
+
# generation_config = types.GenerateContentConfig(
|
166 |
+
# temperature=0.4,
|
167 |
+
# top_p=0.9,
|
168 |
+
# top_k=40,
|
169 |
+
# candidate_count=1,
|
170 |
+
# seed=42,
|
171 |
+
# presence_penalty=0.0,
|
172 |
+
# frequency_penalty=0.0,
|
173 |
+
# )
|
174 |
+
|
175 |
+
@tool
|
176 |
+
def reverse_string(input_string: str) -> str:
|
177 |
+
return input_string[::-1]
|
178 |
+
|
179 |
# Define the real agent
|
180 |
class BasicAgent:
|
181 |
def __init__(self):
|
182 |
+
print("Improved BasicAgent initialized with Gemini and enhanced tools.")
|
183 |
|
184 |
+
# Load Gemini through LiteLLM (ensure GEMINI_API_TOKEN is set)
|
185 |
+
self.model = LiteLLMModel(
|
186 |
+
model_id="gemini/gemini-2.0-flash-lite",
|
187 |
+
api_key=os.getenv("GEMINI_API_TOKEN"),
|
188 |
+
)
|
189 |
|
190 |
+
# Setup CodeAgent with tools
|
191 |
self.agent = CodeAgent(
|
192 |
+
tools=[
|
193 |
+
DuckDuckGoSearchTool(),
|
194 |
+
WikipediaSearchTool(),
|
195 |
+
PythonInterpreterTool(),
|
196 |
+
FinalAnswerTool(),
|
197 |
+
reverse_string,
|
198 |
+
],
|
199 |
+
model=self.model,
|
200 |
+
max_steps=10,
|
201 |
+
add_base_tools=True,
|
202 |
+
additional_authorized_imports=["pandas", "*"]
|
203 |
)
|
204 |
|
205 |
+
def __call__(self, question: str) -> str:
|
206 |
+
"""Main callable for processing questions with rate limiting."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
print(f"Running agent for task: {question[:50]}...")
|
208 |
+
|
209 |
+
# Prompt with strict output formatting
|
210 |
+
system_instruction = (
|
211 |
+
"I will ask you a question. Report your thoughts step by step. "
|
212 |
+
"Finish your answer only with the final answer. In the final answer don't write explanations. "
|
213 |
+
"The answer should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. "
|
214 |
+
"Avoid units, abbreviations, or articles unless specified. "
|
215 |
+
"Pay attention to each sentence in the question and verify the answer against every part. "
|
216 |
+
"Try searching more sources if initial results are insufficient.\nQUESTION: "
|
217 |
+
)
|
218 |
+
|
219 |
+
prompt = system_instruction + question
|
220 |
try:
|
221 |
+
result = self.agent.run(prompt)
|
|
|
222 |
except Exception as e:
|
223 |
+
result = f"Error: {str(e)}"
|
224 |
+
|
225 |
+
# Rate limiting to avoid 429 errors or API limits
|
226 |
+
print("Waiting 3 seconds to respect rate limits...")
|
227 |
+
time.sleep(3)
|
228 |
+
|
229 |
+
return result
|
230 |
|
231 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
232 |
"""
|