Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,52 +10,30 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
| 10 |
|
| 11 |
# --- Basic Agent Definition ---
|
| 12 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
|
|
|
|
|
|
| 13 |
class BasicAgent:
|
| 14 |
def __init__(self):
|
| 15 |
-
print("FLAN-T5 Agent initialized.")
|
| 16 |
-
|
| 17 |
-
self.
|
| 18 |
-
|
| 19 |
-
"
|
| 20 |
-
"
|
| 21 |
-
|
|
|
|
| 22 |
|
| 23 |
def __call__(self, question: str) -> str:
|
| 24 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 25 |
|
| 26 |
-
if not self.api_token:
|
| 27 |
-
return "Error: Missing Hugging Face API token."
|
| 28 |
-
|
| 29 |
-
prompt = f"Answer this question:\n{question.strip()}"
|
| 30 |
-
|
| 31 |
-
payload = {
|
| 32 |
-
"inputs": prompt,
|
| 33 |
-
"parameters": {
|
| 34 |
-
"max_new_tokens": 256,
|
| 35 |
-
"temperature": 0.5
|
| 36 |
-
}
|
| 37 |
-
}
|
| 38 |
-
|
| 39 |
try:
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
# Extract output
|
| 45 |
-
if isinstance(output, list) and "generated_text" in output[0]:
|
| 46 |
-
return output[0]["generated_text"]
|
| 47 |
-
elif isinstance(output, dict) and "generated_text" in output:
|
| 48 |
-
return output["generated_text"]
|
| 49 |
-
elif isinstance(output, list) and "output" in output[0]:
|
| 50 |
-
return output[0]["output"]
|
| 51 |
-
else:
|
| 52 |
-
print(f"Unexpected response: {output}")
|
| 53 |
-
return "Model returned an unexpected format."
|
| 54 |
-
|
| 55 |
except Exception as e:
|
| 56 |
-
print(f"❌
|
| 57 |
-
return f"❌
|
| 58 |
-
|
| 59 |
|
| 60 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 61 |
"""
|
|
|
|
| 10 |
|
| 11 |
# --- Basic Agent Definition ---
|
| 12 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 13 |
+
from transformers import pipeline
|
| 14 |
+
|
| 15 |
class BasicAgent:
|
| 16 |
def __init__(self):
|
| 17 |
+
print("FLAN-T5-SMALL Local Agent initialized.")
|
| 18 |
+
|
| 19 |
+
self.pipeline = pipeline(
|
| 20 |
+
"text2text-generation",
|
| 21 |
+
model="google/flan-t5-small",
|
| 22 |
+
tokenizer="google/flan-t5-small",
|
| 23 |
+
device=-1
|
| 24 |
+
)
|
| 25 |
|
| 26 |
def __call__(self, question: str) -> str:
|
| 27 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
try:
|
| 30 |
+
prompt = f"Answer the following question:\n{question.strip()}"
|
| 31 |
+
result = self.pipeline(prompt, max_new_tokens=128, temperature=0.5)
|
| 32 |
+
answer = result[0]["generated_text"]
|
| 33 |
+
return answer.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
except Exception as e:
|
| 35 |
+
print(f"❌ Error during model inference: {e}")
|
| 36 |
+
return f"❌ Model Error: {str(e)}"
|
|
|
|
| 37 |
|
| 38 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 39 |
"""
|