Spaces:
Sleeping
Sleeping
""" | |
BasicAgent - Simple fallback agent with LLM integration and rule-based answers. | |
This agent provides basic question answering capabilities using LLM API calls | |
with fallback to rule-based responses when API access is unavailable. | |
""" | |
import os | |
import requests | |
import time | |
from typing import Optional | |
from config import ( | |
LLAMA_API_URL, HF_API_TOKEN, HEADERS, MAX_RETRIES, RETRY_DELAY | |
) | |
from utils.text_processing import clean_llm_response, extract_final_answer | |
class BasicAgent: | |
""" | |
Simple agent with LLM integration and rule-based fallbacks. | |
Features: | |
- Direct LLM API integration | |
- Response cleaning and answer extraction | |
- Rule-based fallback answers | |
- Simple prompt formatting | |
""" | |
def __init__(self): | |
print("BasicAgent initialized.") | |
# Set up LLM API access | |
self.hf_api_url = LLAMA_API_URL | |
self.headers = HEADERS | |
# Set up caching for responses | |
self.cache = {} | |
def query_llm(self, prompt: str) -> str: | |
"""Send a prompt to the LLM API and return the response.""" | |
# Check cache first | |
if prompt in self.cache: | |
print("Using cached response") | |
return self.cache[prompt] | |
if not HF_API_TOKEN: | |
# Fallback to rule-based approach if no API token | |
return self.rule_based_answer(prompt) | |
payload = { | |
"inputs": prompt, | |
"parameters": { | |
"max_new_tokens": 300, | |
"temperature": 0.5, | |
"top_p": 0.8, | |
"do_sample": True | |
} | |
} | |
for attempt in range(MAX_RETRIES): | |
try: | |
response = requests.post( | |
self.hf_api_url, | |
headers=self.headers, | |
json=payload, | |
timeout=30 | |
) | |
response.raise_for_status() | |
result = response.json() | |
# Extract the generated text from the response | |
if isinstance(result, list) and len(result) > 0: | |
generated_text = result[0].get("generated_text", "") | |
# Clean up the response to get just the answer | |
clean_response = self.clean_response(generated_text, prompt) | |
# Cache the response | |
self.cache[prompt] = clean_response | |
return clean_response | |
return "I couldn't generate a proper response." | |
except Exception as e: | |
print(f"Attempt {attempt+1}/{MAX_RETRIES} failed: {str(e)}") | |
if attempt < MAX_RETRIES - 1: | |
time.sleep(RETRY_DELAY) | |
else: | |
# Fall back to rule-based method on failure | |
return self.rule_based_answer(prompt) | |
def clean_response(self, response: str, prompt: str) -> str: | |
"""Clean up the LLM response to extract the answer.""" | |
return clean_llm_response(response, prompt) | |
def rule_based_answer(self, question: str) -> str: | |
"""Fallback method using rule-based answers for common question types.""" | |
question_lower = question.lower() | |
# Simple pattern matching for common question types | |
if "what is" in question_lower or "define" in question_lower: | |
if "agent" in question_lower: | |
return "An agent is an autonomous entity that observes and acts upon an environment using sensors and actuators, usually to achieve specific goals." | |
if "gaia" in question_lower: | |
return "GAIA (General AI Assistant) is a framework for creating and evaluating AI assistants that can perform a wide range of tasks." | |
if "llm" in question_lower or "large language model" in question_lower: | |
return "A Large Language Model (LLM) is a neural network trained on vast amounts of text data to understand and generate human language." | |
if "rag" in question_lower or "retrieval" in question_lower: | |
return "RAG (Retrieval-Augmented Generation) combines retrieval of relevant information with generation capabilities of language models." | |
if "how to" in question_lower: | |
return "To accomplish this task, you should first understand the requirements, then implement a solution step by step, and finally test your implementation." | |
if "example" in question_lower: | |
return "Here's an example implementation that demonstrates the concept in a practical manner." | |
if "evaluate" in question_lower or "criteria" in question_lower: | |
return "Evaluation criteria for agents typically include accuracy, relevance, factual correctness, conciseness, ability to follow instructions, and transparency in reasoning." | |
# More specific fallback answers | |
if "tools" in question_lower: | |
return "Tools for AI agents include web search, content extraction, API connections, and various knowledge retrieval mechanisms." | |
if "chain" in question_lower: | |
return "Chain-of-thought reasoning allows AI agents to break down complex problems into sequential steps, improving accuracy and transparency." | |
if "purpose" in question_lower or "goal" in question_lower: | |
return "The purpose of AI agents is to assist users by answering questions, performing tasks, and providing helpful information while maintaining ethical standards." | |
# Default response for truly unmatched questions | |
return "This question relates to AI agent capabilities. To provide a more precise answer, I would need additional information or context about the specific aspect of AI agents you're interested in." | |
def format_prompt(self, question: str) -> str: | |
"""Format the question into a proper prompt for the LLM.""" | |
return f"""You are an intelligent AI assistant. Please answer the following question accurately and concisely: | |
Question: {question} | |
Answer:""" | |
def __call__(self, question: str) -> str: | |
"""Main execution method for the BasicAgent.""" | |
print(f"BasicAgent received question: {question}...") | |
try: | |
# Format the question as a prompt | |
prompt = self.format_prompt(question) | |
# Query the LLM | |
answer = self.query_llm(prompt) | |
# Extract final answer | |
clean_answer = extract_final_answer(answer) | |
print(f"BasicAgent returning answer: {clean_answer}...") | |
return clean_answer | |
except Exception as e: | |
print(f"Error in BasicAgent: {e}") | |
# Fallback to the rule-based method if anything goes wrong | |
fallback_answer = self.rule_based_answer(question) | |
print(f"BasicAgent returning fallback answer: {fallback_answer}...") | |
return fallback_answer | |