Spaces:
Sleeping
Sleeping
Update agent.py
Browse filesRetriever removed.
agent.py
CHANGED
|
@@ -7,10 +7,7 @@ from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
|
| 7 |
from langgraph.graph import StateGraph, END
|
| 8 |
import json # Necessary for parsing JSON output from the LLM
|
| 9 |
|
| 10 |
-
#
|
| 11 |
-
# If your tools require initialization (e.g., a retriever), ensure it's done here.
|
| 12 |
-
# For this example, I assume get_retriever() and tools are available and ready.
|
| 13 |
-
from retriever import get_retriever
|
| 14 |
from tools import tools # Ensure 'tools' is a list of LangChain Tool objects
|
| 15 |
|
| 16 |
# Load the system prompt from file
|
|
@@ -31,8 +28,8 @@ class AgentState(dict):
|
|
| 31 |
"""
|
| 32 |
pass
|
| 33 |
|
| 34 |
-
# 1. Initialize retriever (used
|
| 35 |
-
retriever = get_retriever()
|
| 36 |
|
| 37 |
# 2. Initialize the language model (using Hugging Face Inference Endpoint)
|
| 38 |
# Ensure that this URL is correct and your HF token is available in the environment.
|
|
@@ -289,62 +286,13 @@ class BasicAgent:
|
|
| 289 |
return final_state.get("output", "No answer could be generated.")
|
| 290 |
|
| 291 |
if __name__ == "__main__":
|
| 292 |
-
# Example Usage (for local testing
|
| 293 |
-
# This part assumes you have mock versions of retriever.py and tools.py
|
| 294 |
-
# or actual implementations that can run locally.
|
| 295 |
print("Testing BasicAgent locally...")
|
| 296 |
|
| 297 |
# For local testing, ensure you have:
|
| 298 |
-
# 1. A mock or real `
|
| 299 |
-
# 2. A
|
| 300 |
-
# 3.
|
| 301 |
-
# 4. Your Hugging Face token set as an environment variable (HF_TOKEN).
|
| 302 |
-
# import os
|
| 303 |
-
# os.environ["HF_TOKEN"] = "YOUR_ACTUAL_HF_TOKEN" # Replace with your actual token for local testing
|
| 304 |
-
|
| 305 |
-
# Example content for a dummy system_prompt.txt
|
| 306 |
-
# with open("system_prompt.txt", "w") as f:
|
| 307 |
-
# f.write("""You are an expert AI assistant designed to answer user questions using a set of available tools.
|
| 308 |
-
# Your goal is to provide accurate and concise answers.
|
| 309 |
-
|
| 310 |
-
# **Available Tools:**
|
| 311 |
-
# {{tool_descriptions}}
|
| 312 |
-
|
| 313 |
-
# **Instructions for your output:**
|
| 314 |
-
# Your response MUST be a single, valid JSON object. Do NOT output any other text or explanation outside this JSON.
|
| 315 |
-
|
| 316 |
-
# 1. **If you need to use a tool to answer the question:**
|
| 317 |
-
# Your JSON object MUST have the following structure:
|
| 318 |
-
# ```json
|
| 319 |
-
# {
|
| 320 |
-
# "action": "tool",
|
| 321 |
-
# "tool_name": "[name_of_the_tool]",
|
| 322 |
-
# "tool_args": {
|
| 323 |
-
# "[argument1_name]": "[argument1_value]",
|
| 324 |
-
# "[argument2_name]": "[argument2_value]"
|
| 325 |
-
# }
|
| 326 |
-
# }
|
| 327 |
-
# ```
|
| 328 |
-
# * `tool_name`: Must be one of the exact names listed in "Available Tools".
|
| 329 |
-
# * `tool_args`: This is a JSON object where keys are the exact argument names the tool expects, and values are the corresponding arguments extracted from the user's question or reasoned by you. Ensure argument types match (e.g., numbers for math tools, strings for search queries).
|
| 330 |
-
|
| 331 |
-
# 2. **If you can answer the question directly using your general knowledge without needing a tool:**
|
| 332 |
-
# Your JSON object MUST have the following structure:
|
| 333 |
-
# ```json
|
| 334 |
-
# {
|
| 335 |
-
# "action": "final_answer",
|
| 336 |
-
# "answer": "[your final answer here]"
|
| 337 |
-
# }
|
| 338 |
-
# ```
|
| 339 |
-
# * `answer`: Your concise final answer. When providing numbers, do not include commas, currency symbols, or units unless explicitly requested. For strings, write digits in plain text unless otherwise specified.
|
| 340 |
-
|
| 341 |
-
# **Strict Rules to Follow:**
|
| 342 |
-
# * Always output a single, valid JSON object.
|
| 343 |
-
# * Do not include any conversational text, apologies, or explanations outside the JSON.
|
| 344 |
-
# * Do not guess answers if you are unsure; use a tool if you lack sufficient information.
|
| 345 |
-
# * When a tool result is provided to you, incorporate it into your reasoning to formulate the final answer.
|
| 346 |
-
# """)
|
| 347 |
-
|
| 348 |
|
| 349 |
try:
|
| 350 |
agent = BasicAgent()
|
|
@@ -352,20 +300,23 @@ if __name__ == "__main__":
|
|
| 352 |
response1 = agent("What is the capital of France?")
|
| 353 |
print(f"Agent Response: {response1}")
|
| 354 |
|
| 355 |
-
print("\n--- Test 2: Question requiring a tool (e.g.,
|
| 356 |
-
#
|
| 357 |
-
|
| 358 |
-
response2 = agent("Who is the current president of the United States?")
|
| 359 |
print(f"Agent Response: {response2}")
|
| 360 |
|
| 361 |
-
print("\n--- Test 3:
|
| 362 |
-
response3 = agent("
|
| 363 |
print(f"Agent Response: {response3}")
|
| 364 |
|
| 365 |
-
print("\n--- Test 4: Question
|
| 366 |
-
response4 = agent("What is the
|
| 367 |
print(f"Agent Response: {response4}")
|
| 368 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 369 |
except Exception as e:
|
| 370 |
print(f"\nError during local testing: {e}")
|
| 371 |
-
print("Please ensure your HF_TOKEN is set, and '
|
|
|
|
| 7 |
from langgraph.graph import StateGraph, END
|
| 8 |
import json # Necessary for parsing JSON output from the LLM
|
| 9 |
|
| 10 |
+
# tools is still needed
|
|
|
|
|
|
|
|
|
|
| 11 |
from tools import tools # Ensure 'tools' is a list of LangChain Tool objects
|
| 12 |
|
| 13 |
# Load the system prompt from file
|
|
|
|
| 28 |
"""
|
| 29 |
pass
|
| 30 |
|
| 31 |
+
# 1. REMOVED: Initialize retriever (not used by current tools/agent flow)
|
| 32 |
+
# retriever = get_retriever() # This line is removed
|
| 33 |
|
| 34 |
# 2. Initialize the language model (using Hugging Face Inference Endpoint)
|
| 35 |
# Ensure that this URL is correct and your HF token is available in the environment.
|
|
|
|
| 286 |
return final_state.get("output", "No answer could be generated.")
|
| 287 |
|
| 288 |
if __name__ == "__main__":
|
| 289 |
+
# Example Usage (for local testing)
|
|
|
|
|
|
|
| 290 |
print("Testing BasicAgent locally...")
|
| 291 |
|
| 292 |
# For local testing, ensure you have:
|
| 293 |
+
# 1. A mock or real `tools.py` that provides `tools` (a list of LangChain Tool objects).
|
| 294 |
+
# 2. A `system_prompt.txt` file (create it if not present, with example content below).
|
| 295 |
+
# 3. Your Hugging Face token set as an environment variable (HF_TOKEN).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 296 |
|
| 297 |
try:
|
| 298 |
agent = BasicAgent()
|
|
|
|
| 300 |
response1 = agent("What is the capital of France?")
|
| 301 |
print(f"Agent Response: {response1}")
|
| 302 |
|
| 303 |
+
print("\n--- Test 2: Question requiring a tool (e.g., web_search) ---")
|
| 304 |
+
# This will test if the agent can correctly call web_search
|
| 305 |
+
response2 = agent("What is the current population of the United States?")
|
|
|
|
| 306 |
print(f"Agent Response: {response2}")
|
| 307 |
|
| 308 |
+
print("\n--- Test 3: Math question (e.g., multiply) ---")
|
| 309 |
+
response3 = agent("What is 15 multiplied by 23?")
|
| 310 |
print(f"Agent Response: {response3}")
|
| 311 |
|
| 312 |
+
print("\n--- Test 4: Question requiring Python code execution ---")
|
| 313 |
+
response4 = agent("What is the result of the python code: `sum([x**2 for x in range(1, 5)])`?")
|
| 314 |
print(f"Agent Response: {response4}")
|
| 315 |
|
| 316 |
+
print("\n--- Test 5: Question with no clear tool, but needs a general answer ---")
|
| 317 |
+
response5 = agent("What is the meaning of life?")
|
| 318 |
+
print(f"Agent Response: {response5}")
|
| 319 |
+
|
| 320 |
except Exception as e:
|
| 321 |
print(f"\nError during local testing: {e}")
|
| 322 |
+
print("Please ensure your HF_TOKEN is set, and 'tools.py' is correctly implemented.")
|