Spaces:
Running
Running
Update agent.py
Browse files
agent.py
CHANGED
@@ -54,7 +54,6 @@ from langchain.agents import initialize_agent, Tool, AgentType
|
|
54 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
55 |
from huggingface_hub import login
|
56 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, BitsAndBytesConfig
|
57 |
-
|
58 |
from langchain_huggingface import HuggingFaceEndpoint
|
59 |
|
60 |
load_dotenv()
|
@@ -329,22 +328,35 @@ tools = [tool_map[name] for name in enabled_tool_names]
|
|
329 |
# Step 2: Load the JSON file or tasks (Replace this part if you're loading tasks dynamically)
|
330 |
# -------------------------------
|
331 |
# Here we assume the tasks are already fetched from a URL or file.
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
348 |
|
349 |
# -------------------------------
|
350 |
# Step 3: Create Documents from Each JSON Object
|
|
|
54 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
55 |
from huggingface_hub import login
|
56 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, BitsAndBytesConfig
|
|
|
57 |
from langchain_huggingface import HuggingFaceEndpoint
|
58 |
|
59 |
load_dotenv()
|
|
|
328 |
# Step 2: Load the JSON file or tasks (Replace this part if you're loading tasks dynamically)
|
329 |
# -------------------------------
|
330 |
# Here we assume the tasks are already fetched from a URL or file.
|
331 |
+
|
332 |
+
|
333 |
+
# Replace this with your actual URL
|
334 |
+
json_url = "https://huggingface.co/spaces/wt002/Final_Assignment_Project/blob/main/questions.json"
|
335 |
+
|
336 |
+
try:
|
337 |
+
# Fetch the JSON content from the URL
|
338 |
+
response = requests.get(json_url)
|
339 |
+
response.raise_for_status() # Raise error if the request failed
|
340 |
+
|
341 |
+
tasks = response.json() # Parse JSON content
|
342 |
+
print(f"✅ Loaded {len(tasks)} tasks from URL")
|
343 |
+
|
344 |
+
# Convert each task to a LangChain Document
|
345 |
+
docs = []
|
346 |
+
for task in tasks:
|
347 |
+
question = task.get("question", "").strip()
|
348 |
+
if not question:
|
349 |
+
print(f"⚠️ Skipping task with empty question: {task}")
|
350 |
+
continue
|
351 |
+
|
352 |
+
task["id"] = str(uuid.uuid4())
|
353 |
+
docs.append(Document(page_content=question, metadata=task))
|
354 |
+
|
355 |
+
except requests.RequestException as e:
|
356 |
+
print(f"❌ Failed to fetch JSON from URL: {e}")
|
357 |
+
except ValueError as e:
|
358 |
+
print(f"❌ Invalid JSON format: {e}")
|
359 |
+
|
360 |
|
361 |
# -------------------------------
|
362 |
# Step 3: Create Documents from Each JSON Object
|