Spaces:
Sleeping
Sleeping
Upload 7 files
Browse files- README.md +14 -14
- agent.py +148 -283
- questions.json +122 -0
- requirements.txt +17 -1
- system_prompt.txt +5 -0
README.md
CHANGED
@@ -1,15 +1,15 @@
|
|
1 |
-
---
|
2 |
-
title: Template Final Assignment
|
3 |
-
emoji: 🕵🏻♂️
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 5.25.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
hf_oauth: true
|
11 |
-
# optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
|
12 |
-
hf_oauth_expiration_minutes: 480
|
13 |
-
---
|
14 |
-
|
15 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
---
|
2 |
+
title: Template Final Assignment
|
3 |
+
emoji: 🕵🏻♂️
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.25.2
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
hf_oauth: true
|
11 |
+
# optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
|
12 |
+
hf_oauth_expiration_minutes: 480
|
13 |
+
---
|
14 |
+
|
15 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
agent.py
CHANGED
@@ -1,62 +1,81 @@
|
|
1 |
-
import tempfile
|
2 |
-
from urllib.parse import urlparse
|
3 |
-
from langchain.schema import HumanMessage, AIMessage, SystemMessage
|
4 |
-
from langchain_openai import ChatOpenAI
|
5 |
-
from langchain_core.messages import AnyMessage, SystemMessage
|
6 |
-
from langchain_core.tools import tool
|
7 |
-
from langchain_community.document_loaders import WikipediaLoader
|
8 |
-
from langchain_community.document_loaders import ArxivLoader
|
9 |
-
|
10 |
-
from langchain_community.tools.tavily_search import TavilySearchResults
|
11 |
-
from langchain.tools.retriever import create_retriever_tool
|
12 |
-
|
13 |
-
from langgraph.graph.message import add_messages
|
14 |
-
from langgraph.graph import START, StateGraph, MessagesState, END
|
15 |
-
from langgraph.prebuilt import tools_condition, ToolNode
|
16 |
-
|
17 |
import os
|
18 |
from dotenv import load_dotenv
|
19 |
-
from
|
20 |
-
from
|
|
|
|
|
21 |
|
22 |
from langchain_huggingface import (
|
23 |
ChatHuggingFace,
|
24 |
HuggingFaceEndpoint,
|
25 |
HuggingFaceEmbeddings,
|
26 |
)
|
27 |
-
|
28 |
-
from
|
29 |
-
import
|
30 |
-
from
|
|
|
|
|
|
|
|
|
31 |
|
32 |
load_dotenv()
|
33 |
|
34 |
|
35 |
@tool
|
36 |
-
def
|
|
|
|
|
|
|
|
|
37 |
"""
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
40 |
|
41 |
Args:
|
42 |
-
|
43 |
-
|
|
|
|
|
44 |
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
"""
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
-
|
|
|
|
|
|
|
|
|
60 |
|
61 |
|
62 |
@tool
|
@@ -107,265 +126,111 @@ def arvix_search(query: str) -> str:
|
|
107 |
return {"arvix_results": formatted_search_docs}
|
108 |
|
109 |
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
Download a file from a URL and save it to a temporary location.
|
114 |
-
|
115 |
-
Args:
|
116 |
-
url: The URL to download from
|
117 |
-
filename: Optional filename, will generate one based on URL if not provided
|
118 |
-
|
119 |
-
Returns:
|
120 |
-
Path to the downloaded file
|
121 |
-
"""
|
122 |
-
try:
|
123 |
-
# Parse URL to get filename if not provided
|
124 |
-
if not filename:
|
125 |
-
path = urlparse(url).path
|
126 |
-
filename = os.path.basename(path)
|
127 |
-
if not filename:
|
128 |
-
# Generate a random name if we couldn't extract one
|
129 |
-
import uuid
|
130 |
-
|
131 |
-
filename = f"downloaded_{uuid.uuid4().hex[:8]}"
|
132 |
-
|
133 |
-
# Create temporary file
|
134 |
-
temp_dir = tempfile.gettempdir()
|
135 |
-
filepath = os.path.join(temp_dir, filename)
|
136 |
-
|
137 |
-
# Download the file
|
138 |
-
response = requests.get(url, stream=True)
|
139 |
-
response.raise_for_status()
|
140 |
|
141 |
-
|
142 |
-
|
143 |
-
for chunk in response.iter_content(chunk_size=8192):
|
144 |
-
f.write(chunk)
|
145 |
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
""
|
162 |
-
|
163 |
-
|
164 |
-
import pytesseract
|
165 |
-
from PIL import Image
|
166 |
-
|
167 |
-
# Open the image
|
168 |
-
image = Image.open(image_path)
|
169 |
-
|
170 |
-
# Extract text
|
171 |
-
text = pytesseract.image_to_string(image)
|
172 |
-
|
173 |
-
return f"Extracted text from image:\n\n{text}"
|
174 |
-
except ImportError:
|
175 |
-
return "Error: pytesseract is not installed. Please install it with 'pip install pytesseract' and ensure Tesseract OCR is installed on your system."
|
176 |
-
except Exception as e:
|
177 |
-
return f"Error extracting text from image: {str(e)}"
|
178 |
-
|
179 |
-
|
180 |
-
@tool
|
181 |
-
def analyze_csv_file(file_path: str, query: str) -> str:
|
182 |
-
"""
|
183 |
-
Analyze a CSV file using pandas and answer a question about it.
|
184 |
-
|
185 |
-
Args:
|
186 |
-
file_path: Path to the CSV file
|
187 |
-
query: Question about the data
|
188 |
-
|
189 |
-
Returns:
|
190 |
-
Analysis result or error message
|
191 |
-
"""
|
192 |
-
try:
|
193 |
-
import pandas as pd
|
194 |
-
|
195 |
-
# Read the CSV file
|
196 |
-
df = pd.read_csv(file_path)
|
197 |
-
|
198 |
-
# Run various analyses based on the query
|
199 |
-
result = f"CSV file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
|
200 |
-
result += f"Columns: {', '.join(df.columns)}\n\n"
|
201 |
-
|
202 |
-
# Add summary statistics
|
203 |
-
result += "Summary statistics:\n"
|
204 |
-
result += str(df.describe())
|
205 |
-
|
206 |
-
return result
|
207 |
-
except ImportError:
|
208 |
-
return "Error: pandas is not installed. Please install it with 'pip install pandas'."
|
209 |
-
except Exception as e:
|
210 |
-
return f"Error analyzing CSV file: {str(e)}"
|
211 |
-
|
212 |
-
|
213 |
-
@tool
|
214 |
-
def analyze_excel_file(file_path: str, query: str) -> str:
|
215 |
-
"""
|
216 |
-
Analyze an Excel file using pandas and answer a question about it.
|
217 |
-
|
218 |
-
Args:
|
219 |
-
file_path: Path to the Excel file
|
220 |
-
query: Question about the data
|
221 |
-
|
222 |
-
Returns:
|
223 |
-
Analysis result or error message
|
224 |
-
"""
|
225 |
-
try:
|
226 |
-
import pandas as pd
|
227 |
|
228 |
-
# Read the Excel file
|
229 |
-
df = pd.read_excel(file_path)
|
230 |
|
231 |
-
# Run various analyses based on the query
|
232 |
-
result = (
|
233 |
-
f"Excel file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
|
234 |
-
)
|
235 |
-
result += f"Columns: {', '.join(df.columns)}\n\n"
|
236 |
-
|
237 |
-
# Add summary statistics
|
238 |
-
result += "Summary statistics:\n"
|
239 |
-
result += str(df.describe())
|
240 |
-
|
241 |
-
return result
|
242 |
-
except ImportError:
|
243 |
-
return "Error: pandas and openpyxl are not installed. Please install them with 'pip install pandas openpyxl'."
|
244 |
-
except Exception as e:
|
245 |
-
return f"Error analyzing Excel file: {str(e)}"
|
246 |
-
|
247 |
-
|
248 |
-
# Initialize the DuckDuckGo search tool
|
249 |
-
search_tool = DuckDuckGoSearchResults()
|
250 |
-
|
251 |
-
|
252 |
-
# # Load LLM model
|
253 |
-
# llm = ChatOpenAI(
|
254 |
-
# model="gpt-4o",
|
255 |
-
# base_url="https://models.inference.ai.azure.com",
|
256 |
-
# api_key=os.environ["GITHUB_TOKEN"],
|
257 |
-
# temperature=0.2,
|
258 |
-
# max_tokens=4096,
|
259 |
-
# )
|
260 |
-
# llm = ChatHuggingFace(
|
261 |
-
# llm=HuggingFaceEndpoint(
|
262 |
-
# repo_id="Qwen/Qwen3-4B",
|
263 |
-
# # repo_id="meta-llama/Llama-3-70B-Instruct",
|
264 |
-
# temperature=0,
|
265 |
-
# huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
|
266 |
-
# ),
|
267 |
-
# verbose=True,
|
268 |
-
# )
|
269 |
-
llm = ChatGoogleGenerativeAI(
|
270 |
-
model="gemini-2.0-flash-exp", google_api_key=os.environ["GOOGLE_API_KEY"]
|
271 |
-
)
|
272 |
tools = [
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
web_search,
|
279 |
wiki_search,
|
|
|
280 |
arvix_search,
|
281 |
]
|
282 |
-
# Bind the tools to the LLM
|
283 |
-
model_with_tools = llm.bind_tools(tools)
|
284 |
-
tool_node = ToolNode(tools)
|
285 |
-
|
286 |
-
|
287 |
-
class AgentState(TypedDict):
|
288 |
-
"""State of the agent."""
|
289 |
-
|
290 |
-
input_file: Optional[str]
|
291 |
-
messages: Annotated[list[AnyMessage], add_messages]
|
292 |
-
|
293 |
-
|
294 |
-
def build_agent_workflow():
|
295 |
-
"""Build the agent workflow."""
|
296 |
-
|
297 |
-
def call_model(state: AgentState):
|
298 |
-
print("State:", state["messages"])
|
299 |
-
question = state["messages"][-1].content
|
300 |
-
context = f"""
|
301 |
-
You are a helpful assistant tasked with answering questions using a set of tools.
|
302 |
-
"""
|
303 |
-
# System message
|
304 |
-
if state.get("input_file"):
|
305 |
-
try:
|
306 |
-
with open(state.get("input_file"), "r") as f:
|
307 |
-
file_content = f.read()
|
308 |
-
print("File content:", file_content)
|
309 |
-
|
310 |
-
# Determine file type from extension
|
311 |
-
file_ext = os.path.splitext(state.get("input_file"))[1].lower()
|
312 |
-
context = f"""
|
313 |
-
Question: {question}
|
314 |
-
This question has an associated file. Here is the file content:
|
315 |
-
```{file_ext}
|
316 |
-
{file_content}
|
317 |
-
```
|
318 |
-
Analyze the file content above to answer the question."""
|
319 |
-
except Exception as file_e:
|
320 |
-
context = f""" Question: {state["message"]}
|
321 |
-
This question has an associated file at path: {state.get("input_file")}
|
322 |
-
However, there was an error reading the file: {file_e}
|
323 |
-
You can still try to answer the question based on the information provided.
|
324 |
-
"""
|
325 |
-
|
326 |
-
if question.startswith(".") or ".rewsna eht sa" in question:
|
327 |
-
print("Reversed text detected.")
|
328 |
-
print(state.get("messages")[::-1])
|
329 |
-
|
330 |
-
context = f"""
|
331 |
-
This question appears to be in reversed text. your task to reverse the sentence. Here's the reversed example:
|
332 |
-
.rewsna eht sa "tfel" drow eht fo etisoppo eht etirw ,ecnetnes siht dnatsrednu uoy fI
|
333 |
-
and the answer is:
|
334 |
-
"If you understand this sentence, write the opposite of the word "left" as the answer."
|
335 |
-
|
336 |
-
Now rewrite in to proper formate the {question}. Remember to format your answer exactly as requested.
|
337 |
-
"""
|
338 |
-
system_prompt = SystemMessage(
|
339 |
-
f"""{context}
|
340 |
-
When answering, provide ONLY the precise answer requested.
|
341 |
-
Do not include explanations, steps, reasoning, or additional text.
|
342 |
-
Be direct and specific. GAIA benchmark requires exact matching answers.
|
343 |
-
For example, if asked "What is the capital of France?", respond simply with "Paris".
|
344 |
-
"""
|
345 |
-
)
|
346 |
-
return {
|
347 |
-
"messages": [model_with_tools.invoke([system_prompt] + state["messages"])],
|
348 |
-
# "input_file": state["input_file"],
|
349 |
-
}
|
350 |
|
351 |
-
# Define the state graph
|
352 |
-
workflow = StateGraph(MessagesState)
|
353 |
-
workflow.add_node("agent", call_model)
|
354 |
-
workflow.add_node("tools", tool_node)
|
355 |
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
361 |
|
362 |
|
|
|
363 |
if __name__ == "__main__":
|
364 |
-
question =
|
365 |
# Build the graph
|
366 |
-
graph = build_agent_workflow()
|
367 |
# Run the graph
|
368 |
messages = [HumanMessage(content=question)]
|
369 |
-
messages = graph.invoke({"messages": messages
|
370 |
for m in messages["messages"]:
|
371 |
m.pretty_print()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
from dotenv import load_dotenv
|
3 |
+
from langgraph.graph import START, StateGraph, MessagesState
|
4 |
+
from langgraph.prebuilt import tools_condition
|
5 |
+
from langgraph.prebuilt import ToolNode
|
6 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
7 |
|
8 |
from langchain_huggingface import (
|
9 |
ChatHuggingFace,
|
10 |
HuggingFaceEndpoint,
|
11 |
HuggingFaceEmbeddings,
|
12 |
)
|
13 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
14 |
+
from langchain_community.document_loaders import WikipediaLoader
|
15 |
+
from langchain_community.document_loaders import ArxivLoader
|
16 |
+
from langchain_community.vectorstores import SupabaseVectorStore
|
17 |
+
from langchain_core.messages import SystemMessage, HumanMessage
|
18 |
+
from langchain_core.tools import tool
|
19 |
+
from langchain.tools.retriever import create_retriever_tool
|
20 |
+
from supabase.client import Client, create_client
|
21 |
|
22 |
load_dotenv()
|
23 |
|
24 |
|
25 |
@tool
|
26 |
+
def multiply(a: int, b: int) -> int:
|
27 |
+
"""Multiply two numbers.
|
28 |
+
Args:
|
29 |
+
a: first int
|
30 |
+
b: second int
|
31 |
"""
|
32 |
+
return a * b
|
33 |
+
|
34 |
+
|
35 |
+
@tool
|
36 |
+
def add(a: int, b: int) -> int:
|
37 |
+
"""Add two numbers.
|
38 |
|
39 |
Args:
|
40 |
+
a: first int
|
41 |
+
b: second int
|
42 |
+
"""
|
43 |
+
return a + b
|
44 |
|
45 |
+
|
46 |
+
@tool
|
47 |
+
def subtract(a: int, b: int) -> int:
|
48 |
+
"""Subtract two numbers.
|
49 |
+
|
50 |
+
Args:
|
51 |
+
a: first int
|
52 |
+
b: second int
|
53 |
"""
|
54 |
+
return a - b
|
55 |
+
|
56 |
+
|
57 |
+
@tool
|
58 |
+
def divide(a: int, b: int) -> int:
|
59 |
+
"""Divide two numbers.
|
60 |
|
61 |
+
Args:
|
62 |
+
a: first int
|
63 |
+
b: second int
|
64 |
+
"""
|
65 |
+
if b == 0:
|
66 |
+
raise ValueError("Cannot divide by zero.")
|
67 |
+
return a / b
|
68 |
+
|
69 |
+
|
70 |
+
@tool
|
71 |
+
def modulus(a: int, b: int) -> int:
|
72 |
+
"""Get the modulus of two numbers.
|
73 |
|
74 |
+
Args:
|
75 |
+
a: first int
|
76 |
+
b: second int
|
77 |
+
"""
|
78 |
+
return a % b
|
79 |
|
80 |
|
81 |
@tool
|
|
|
126 |
return {"arvix_results": formatted_search_docs}
|
127 |
|
128 |
|
129 |
+
# load the system prompt from the file
|
130 |
+
with open("system_prompt.txt", "r", encoding="utf-8") as f:
|
131 |
+
system_prompt = f.read()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
+
# System message
|
134 |
+
sys_msg = SystemMessage(content=system_prompt)
|
|
|
|
|
135 |
|
136 |
+
# build a retriever
|
137 |
+
embeddings = HuggingFaceEmbeddings(
|
138 |
+
model_name="sentence-transformers/all-mpnet-base-v2"
|
139 |
+
) # dim=768
|
140 |
+
supabase: Client = create_client(
|
141 |
+
os.environ.get("SUPABASE_URL"), os.environ.get("SUPABASE_SERVICE_KEY")
|
142 |
+
)
|
143 |
+
vector_store = SupabaseVectorStore(
|
144 |
+
client=supabase,
|
145 |
+
embedding=embeddings,
|
146 |
+
table_name="documents",
|
147 |
+
query_name="match_documents_langchain",
|
148 |
+
)
|
149 |
+
create_retriever_tool = create_retriever_tool(
|
150 |
+
retriever=vector_store.as_retriever(),
|
151 |
+
name="Question Search",
|
152 |
+
description="A tool to retrieve similar questions from a vector store.",
|
153 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
|
|
|
|
|
155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
tools = [
|
157 |
+
multiply,
|
158 |
+
add,
|
159 |
+
subtract,
|
160 |
+
divide,
|
161 |
+
modulus,
|
|
|
162 |
wiki_search,
|
163 |
+
web_search,
|
164 |
arvix_search,
|
165 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
|
|
|
|
|
|
|
|
|
167 |
|
168 |
+
# Build graph function
|
169 |
+
def build_agent_workflow(provider: str = "groq"):
|
170 |
+
"""Build the graph"""
|
171 |
+
# Load environment variables from .env file
|
172 |
+
if provider == "google":
|
173 |
+
# Google Gemini
|
174 |
+
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
175 |
+
elif provider == "github":
|
176 |
+
from langchain_openai import ChatOpenAI
|
177 |
+
|
178 |
+
llm = ChatOpenAI(
|
179 |
+
model="gpt-4o",
|
180 |
+
base_url="https://models.inference.ai.azure.com",
|
181 |
+
api_key=os.environ["GITHUB_TOKEN"],
|
182 |
+
temperature=0.2,
|
183 |
+
max_tokens=4096,
|
184 |
+
)
|
185 |
+
elif provider == "huggingface":
|
186 |
+
# TODO: Add huggingface endpoint
|
187 |
+
llm = ChatHuggingFace(
|
188 |
+
llm=HuggingFaceEndpoint(
|
189 |
+
url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
|
190 |
+
temperature=0,
|
191 |
+
),
|
192 |
+
)
|
193 |
+
else:
|
194 |
+
raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
|
195 |
+
# Bind tools to LLM
|
196 |
+
llm_with_tools = llm.bind_tools(tools)
|
197 |
+
|
198 |
+
# Node
|
199 |
+
def assistant(state: MessagesState):
|
200 |
+
"""Assistant node"""
|
201 |
+
return {"messages": [llm_with_tools.invoke(state["messages"])]}
|
202 |
+
|
203 |
+
def retriever(state: MessagesState):
|
204 |
+
"""Retriever node"""
|
205 |
+
similar_question = vector_store.similarity_search(state["messages"][0].content)
|
206 |
+
example_msg = HumanMessage(
|
207 |
+
content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}",
|
208 |
+
)
|
209 |
+
return {"messages": [sys_msg] + state["messages"] + [example_msg]}
|
210 |
+
|
211 |
+
builder = StateGraph(MessagesState)
|
212 |
+
builder.add_node("retriever", retriever)
|
213 |
+
builder.add_node("assistant", assistant)
|
214 |
+
builder.add_node("tools", ToolNode(tools))
|
215 |
+
builder.add_edge(START, "retriever")
|
216 |
+
builder.add_edge("retriever", "assistant")
|
217 |
+
builder.add_conditional_edges(
|
218 |
+
"assistant",
|
219 |
+
tools_condition,
|
220 |
+
)
|
221 |
+
builder.add_edge("tools", "assistant")
|
222 |
+
|
223 |
+
# Compile graph
|
224 |
+
return builder.compile()
|
225 |
|
226 |
|
227 |
+
# test
|
228 |
if __name__ == "__main__":
|
229 |
+
question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
|
230 |
# Build the graph
|
231 |
+
graph = build_agent_workflow(provider="github")
|
232 |
# Run the graph
|
233 |
messages = [HumanMessage(content=question)]
|
234 |
+
messages = graph.invoke({"messages": messages})
|
235 |
for m in messages["messages"]:
|
236 |
m.pretty_print()
|
questions.json
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"task_id": "8e867cd7-cff9-4e6c-867a-ff5ddc2550be",
|
4 |
+
"question": "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia.",
|
5 |
+
"Level": "1",
|
6 |
+
"file_name": ""
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"task_id": "a1e91b78-d3d8-4675-bb8d-62741b4b68a6",
|
10 |
+
"question": "In the video https://www.youtube.com/watch?v=L1vXCYZAYYM, what is the highest number of bird species to be on camera simultaneously?",
|
11 |
+
"Level": "1",
|
12 |
+
"file_name": ""
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"task_id": "2d83110e-a098-4ebb-9987-066c06fa42d0",
|
16 |
+
"question": ".rewsna eht sa \"tfel\" drow eht fo etisoppo eht etirw ,ecnetnes siht dnatsrednu uoy fI",
|
17 |
+
"Level": "1",
|
18 |
+
"file_name": ""
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"task_id": "cca530fc-4052-43b2-b130-b30968d8aa44",
|
22 |
+
"question": "Review the chess position provided in the image. It is black's turn. Provide the correct next move for black which guarantees a win. Please provide your response in algebraic notation.",
|
23 |
+
"Level": "1",
|
24 |
+
"file_name": "cca530fc-4052-43b2-b130-b30968d8aa44.png"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"task_id": "4fc2f1ae-8625-45b5-ab34-ad4433bc21f8",
|
28 |
+
"question": "Who nominated the only Featured Article on English Wikipedia about a dinosaur that was promoted in November 2016?",
|
29 |
+
"Level": "1",
|
30 |
+
"file_name": ""
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"task_id": "6f37996b-2ac7-44b0-8e68-6d28256631b4",
|
34 |
+
"question": "Given this table defining * on the set S = {a, b, c, d, e}\n\n|*|a|b|c|d|e|\n|---|---|---|---|---|---|\n|a|a|b|c|b|d|\n|b|b|c|a|e|c|\n|c|c|a|b|b|a|\n|d|b|e|b|e|d|\n|e|d|b|a|d|c|\n\nprovide the subset of S involved in any possible counter-examples that prove * is not commutative. Provide your answer as a comma separated list of the elements in the set in alphabetical order.",
|
35 |
+
"Level": "1",
|
36 |
+
"file_name": ""
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"task_id": "9d191bce-651d-4746-be2d-7ef8ecadb9c2",
|
40 |
+
"question": "Examine the video at https://www.youtube.com/watch?v=1htKBjuUWec.\n\nWhat does Teal'c say in response to the question \"Isn't that hot?\"",
|
41 |
+
"Level": "1",
|
42 |
+
"file_name": ""
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"task_id": "cabe07ed-9eca-40ea-8ead-410ef5e83f91",
|
46 |
+
"question": "What is the surname of the equine veterinarian mentioned in 1.E Exercises from the chemistry materials licensed by Marisa Alviar-Agnew & Henry Agnew under the CK-12 license in LibreText's Introductory Chemistry materials as compiled 08/21/2023?",
|
47 |
+
"Level": "1",
|
48 |
+
"file_name": ""
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"task_id": "3cef3a44-215e-4aed-8e3b-b1e3f08063b7",
|
52 |
+
"question": "I'm making a grocery list for my mom, but she's a professor of botany and she's a real stickler when it comes to categorizing things. I need to add different foods to different categories on the grocery list, but if I make a mistake, she won't buy anything inserted in the wrong category. Here's the list I have so far:\n\nmilk, eggs, flour, whole bean coffee, Oreos, sweet potatoes, fresh basil, plums, green beans, rice, corn, bell pepper, whole allspice, acorns, broccoli, celery, zucchini, lettuce, peanuts\n\nI need to make headings for the fruits and vegetables. Could you please create a list of just the vegetables from my list? If you could do that, then I can figure out how to categorize the rest of the list into the appropriate categories. But remember that my mom is a real stickler, so make sure that no botanical fruits end up on the vegetable list, or she won't get them when she's at the store. Please alphabetize the list of vegetables, and place each item in a comma separated list.",
|
53 |
+
"Level": "1",
|
54 |
+
"file_name": ""
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"task_id": "99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3",
|
58 |
+
"question": "Hi, I'm making a pie but I could use some help with my shopping list. I have everything I need for the crust, but I'm not sure about the filling. I got the recipe from my friend Aditi, but she left it as a voice memo and the speaker on my phone is buzzing so I can't quite make out what she's saying. Could you please listen to the recipe and list all of the ingredients that my friend described? I only want the ingredients for the filling, as I have everything I need to make my favorite pie crust. I've attached the recipe as Strawberry pie.mp3.\n\nIn your response, please only list the ingredients, not any measurements. So if the recipe calls for \"a pinch of salt\" or \"two cups of ripe strawberries\" the ingredients on the list would be \"salt\" and \"ripe strawberries\".\n\nPlease format your response as a comma separated list of ingredients. Also, please alphabetize the ingredients.",
|
59 |
+
"Level": "1",
|
60 |
+
"file_name": "99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3.mp3"
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"task_id": "305ac316-eef6-4446-960a-92d80d542f82",
|
64 |
+
"question": "Who did the actor who played Ray in the Polish-language version of Everybody Loves Raymond play in Magda M.? Give only the first name.",
|
65 |
+
"Level": "1",
|
66 |
+
"file_name": ""
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"task_id": "f918266a-b3e0-4914-865d-4faa564f1aef",
|
70 |
+
"question": "What is the final numeric output from the attached Python code?",
|
71 |
+
"Level": "1",
|
72 |
+
"file_name": "f918266a-b3e0-4914-865d-4faa564f1aef.py"
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"task_id": "3f57289b-8c60-48be-bd80-01f8099ca449",
|
76 |
+
"question": "How many at bats did the Yankee with the most walks in the 1977 regular season have that same season?",
|
77 |
+
"Level": "1",
|
78 |
+
"file_name": ""
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"task_id": "1f975693-876d-457b-a649-393859e79bf3",
|
82 |
+
"question": "Hi, I was out sick from my classes on Friday, so I'm trying to figure out what I need to study for my Calculus mid-term next week. My friend from class sent me an audio recording of Professor Willowbrook giving out the recommended reading for the test, but my headphones are broken :(\n\nCould you please listen to the recording for me and tell me the page numbers I'm supposed to go over? I've attached a file called Homework.mp3 that has the recording. Please provide just the page numbers as a comma-delimited list. And please provide the list in ascending order.",
|
83 |
+
"Level": "1",
|
84 |
+
"file_name": "1f975693-876d-457b-a649-393859e79bf3.mp3"
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"task_id": "840bfca7-4f7b-481a-8794-c560c340185d",
|
88 |
+
"question": "On June 6, 2023, an article by Carolyn Collins Petersen was published in Universe Today. This article mentions a team that produced a paper about their observations, linked at the bottom of the article. Find this paper. Under what NASA award number was the work performed by R. G. Arendt supported by?",
|
89 |
+
"Level": "1",
|
90 |
+
"file_name": ""
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"task_id": "bda648d7-d618-4883-88f4-3466eabd860e",
|
94 |
+
"question": "Where were the Vietnamese specimens described by Kuznetzov in Nedoshivina's 2010 paper eventually deposited? Just give me the city name without abbreviations.",
|
95 |
+
"Level": "1",
|
96 |
+
"file_name": ""
|
97 |
+
},
|
98 |
+
{
|
99 |
+
"task_id": "cf106601-ab4f-4af9-b045-5295fe67b37d",
|
100 |
+
"question": "What country had the least number of athletes at the 1928 Summer Olympics? If there's a tie for a number of athletes, return the first in alphabetical order. Give the IOC country code as your answer.",
|
101 |
+
"Level": "1",
|
102 |
+
"file_name": ""
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"task_id": "a0c07678-e491-4bbc-8f0b-07405144218f",
|
106 |
+
"question": "Who are the pitchers with the number before and after Taishō Tamai's number as of July 2023? Give them to me in the form Pitcher Before, Pitcher After, use their last names only, in Roman characters.",
|
107 |
+
"Level": "1",
|
108 |
+
"file_name": ""
|
109 |
+
},
|
110 |
+
{
|
111 |
+
"task_id": "7bd855d8-463d-4ed5-93ca-5fe35145f733",
|
112 |
+
"question": "The attached Excel file contains the sales of menu items for a local fast-food chain. What were the total sales that the chain made from food (not including drinks)? Express your answer in USD with two decimal places.",
|
113 |
+
"Level": "1",
|
114 |
+
"file_name": "7bd855d8-463d-4ed5-93ca-5fe35145f733.xlsx"
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"task_id": "5a0c1adf-205e-4841-a666-7c3ef95def9d",
|
118 |
+
"question": "What is the first name of the only Malko Competition recipient from the 20th Century (after 1977) whose nationality on record is a country that no longer exists?",
|
119 |
+
"Level": "1",
|
120 |
+
"file_name": ""
|
121 |
+
}
|
122 |
+
]
|
requirements.txt
CHANGED
@@ -22,6 +22,7 @@ colorama==0.4.6
|
|
22 |
coloredlogs==15.0.1
|
23 |
dataclasses-json==0.6.7
|
24 |
Deprecated==1.2.18
|
|
|
25 |
dirtyjson==1.0.8
|
26 |
distro==1.9.0
|
27 |
duckduckgo_search==8.0.1
|
@@ -38,6 +39,7 @@ google-ai-generativelanguage==0.6.18
|
|
38 |
google-api-core==2.24.2
|
39 |
google-auth==2.39.0
|
40 |
googleapis-common-protos==1.70.0
|
|
|
41 |
gradio==5.27.1
|
42 |
gradio_client==1.9.1
|
43 |
greenlet==3.2.1
|
@@ -46,15 +48,19 @@ groovy==0.1.2
|
|
46 |
grpcio==1.71.0
|
47 |
grpcio-status==1.71.0
|
48 |
h11==0.14.0
|
|
|
|
|
49 |
httpcore==1.0.8
|
50 |
httptools==0.6.4
|
51 |
httpx==0.28.1
|
52 |
httpx-sse==0.4.0
|
53 |
huggingface-hub==0.30.2
|
54 |
humanfriendly==10.0
|
|
|
55 |
idna==3.10
|
56 |
importlib_metadata==8.6.1
|
57 |
importlib_resources==6.5.2
|
|
|
58 |
Jinja2==3.1.6
|
59 |
jiter==0.9.0
|
60 |
joblib==1.4.2
|
@@ -111,6 +117,8 @@ packaging==24.2
|
|
111 |
pandas==2.2.3
|
112 |
pillow==11.2.1
|
113 |
platformdirs==4.3.7
|
|
|
|
|
114 |
posthog==4.0.0
|
115 |
primp==0.15.0
|
116 |
propcache==0.3.1
|
@@ -123,16 +131,20 @@ pydantic-settings==2.8.1
|
|
123 |
pydantic_core==2.33.1
|
124 |
pydub==0.25.1
|
125 |
Pygments==2.19.1
|
|
|
126 |
PyMuPDF==1.25.5
|
127 |
PyPika==0.48.9
|
128 |
pyproject_hooks==1.2.0
|
129 |
pyreadline3==3.5.4
|
130 |
pytesseract==0.3.13
|
|
|
|
|
131 |
python-dateutil==2.9.0.post0
|
132 |
python-dotenv==1.1.0
|
133 |
python-multipart==0.0.20
|
134 |
pytz==2025.2
|
135 |
PyYAML==6.0.2
|
|
|
136 |
referencing==0.36.2
|
137 |
regex==2024.11.6
|
138 |
requests==2.32.3
|
@@ -156,6 +168,10 @@ soupsieve==2.7
|
|
156 |
SQLAlchemy==2.0.40
|
157 |
sse-starlette==2.2.1
|
158 |
starlette==0.45.3
|
|
|
|
|
|
|
|
|
159 |
sympy==1.14.0
|
160 |
tenacity==9.1.2
|
161 |
threadpoolctl==3.6.0
|
@@ -174,7 +190,7 @@ urllib3==2.4.0
|
|
174 |
uvicorn==0.34.1
|
175 |
watchfiles==1.0.5
|
176 |
websocket-client==1.8.0
|
177 |
-
websockets==
|
178 |
wikipedia==1.4.0
|
179 |
wrapt==1.17.2
|
180 |
xxhash==3.5.0
|
|
|
22 |
coloredlogs==15.0.1
|
23 |
dataclasses-json==0.6.7
|
24 |
Deprecated==1.2.18
|
25 |
+
deprecation==2.1.0
|
26 |
dirtyjson==1.0.8
|
27 |
distro==1.9.0
|
28 |
duckduckgo_search==8.0.1
|
|
|
39 |
google-api-core==2.24.2
|
40 |
google-auth==2.39.0
|
41 |
googleapis-common-protos==1.70.0
|
42 |
+
gotrue==2.12.0
|
43 |
gradio==5.27.1
|
44 |
gradio_client==1.9.1
|
45 |
greenlet==3.2.1
|
|
|
48 |
grpcio==1.71.0
|
49 |
grpcio-status==1.71.0
|
50 |
h11==0.14.0
|
51 |
+
h2==4.2.0
|
52 |
+
hpack==4.1.0
|
53 |
httpcore==1.0.8
|
54 |
httptools==0.6.4
|
55 |
httpx==0.28.1
|
56 |
httpx-sse==0.4.0
|
57 |
huggingface-hub==0.30.2
|
58 |
humanfriendly==10.0
|
59 |
+
hyperframe==6.1.0
|
60 |
idna==3.10
|
61 |
importlib_metadata==8.6.1
|
62 |
importlib_resources==6.5.2
|
63 |
+
iniconfig==2.1.0
|
64 |
Jinja2==3.1.6
|
65 |
jiter==0.9.0
|
66 |
joblib==1.4.2
|
|
|
117 |
pandas==2.2.3
|
118 |
pillow==11.2.1
|
119 |
platformdirs==4.3.7
|
120 |
+
pluggy==1.5.0
|
121 |
+
postgrest==1.0.1
|
122 |
posthog==4.0.0
|
123 |
primp==0.15.0
|
124 |
propcache==0.3.1
|
|
|
131 |
pydantic_core==2.33.1
|
132 |
pydub==0.25.1
|
133 |
Pygments==2.19.1
|
134 |
+
PyJWT==2.10.1
|
135 |
PyMuPDF==1.25.5
|
136 |
PyPika==0.48.9
|
137 |
pyproject_hooks==1.2.0
|
138 |
pyreadline3==3.5.4
|
139 |
pytesseract==0.3.13
|
140 |
+
pytest==8.3.5
|
141 |
+
pytest-mock==3.14.0
|
142 |
python-dateutil==2.9.0.post0
|
143 |
python-dotenv==1.1.0
|
144 |
python-multipart==0.0.20
|
145 |
pytz==2025.2
|
146 |
PyYAML==6.0.2
|
147 |
+
realtime==2.4.3
|
148 |
referencing==0.36.2
|
149 |
regex==2024.11.6
|
150 |
requests==2.32.3
|
|
|
168 |
SQLAlchemy==2.0.40
|
169 |
sse-starlette==2.2.1
|
170 |
starlette==0.45.3
|
171 |
+
storage3==0.11.3
|
172 |
+
StrEnum==0.4.15
|
173 |
+
supabase==2.15.1
|
174 |
+
supafunc==0.9.4
|
175 |
sympy==1.14.0
|
176 |
tenacity==9.1.2
|
177 |
threadpoolctl==3.6.0
|
|
|
190 |
uvicorn==0.34.1
|
191 |
watchfiles==1.0.5
|
192 |
websocket-client==1.8.0
|
193 |
+
websockets==14.2
|
194 |
wikipedia==1.4.0
|
195 |
wrapt==1.17.2
|
196 |
xxhash==3.5.0
|
system_prompt.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
You are a helpful assistant tasked with answering questions using a set of tools.
|
2 |
+
Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
|
3 |
+
FINAL ANSWER: [YOUR FINAL ANSWER].
|
4 |
+
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
|
5 |
+
Your answer should only start with "FINAL ANSWER: ", then follows with the answer.
|