Spaces:
Sleeping
Sleeping
from smolagents import CodeAgent,DuckDuckGoSearchTool, LiteLLMModel,load_tool,tool | |
import datetime | |
import requests | |
import pytz | |
import yaml | |
from tools.final_answer import FinalAnswerTool | |
import os | |
from Gradio_UI import GradioUI | |
from duckduckgo_search import DDGS | |
import datetime | |
import time | |
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type | |
#Keep this format for the description / args / args description but feel free to modify the tool | |
"""A tool that does nothing yet | |
Args: | |
arg1: the first argument | |
arg2: the second argument | |
""" | |
return "What magic will you build ?" | |
def browsing_tool_fetch_content(url: str, query_context: str) -> str: | |
""" | |
Placeholder function to simulate fetching full content from a URL. | |
In a real scenario, this would use a library like 'requests' and 'BeautifulSoup' | |
or a dedicated browsing/scraping API. | |
The query_context is provided if the browsing tool can use it for better extraction. | |
Args: | |
url: the URL to fetch the content from. | |
query_context: the context related to the URL. | |
""" | |
print(f"[Browsing Tool Stub] Attempting to fetch content for URL: {url} (context: '{query_context}')") | |
# Simulate fetching content. Replace with actual fetching logic. | |
# For demonstration, we'll return a placeholder. | |
# In a real implementation, you'd handle potential errors (network issues, 404s, etc.) | |
try: | |
# Example (conceptual - requests/BeautifulSoup would be more robust): | |
import requests | |
from bs4 import BeautifulSoup | |
response = requests.get(url, timeout=10) | |
response.raise_for_status() # Raise an exception for HTTP errors | |
soup = BeautifulSoup(response.content, 'html.parser') | |
# Extract text - this is a simple example and might need refinement | |
paragraphs = soup.find_all('p') | |
fetched_text = "\n".join([p.get_text() for p in paragraphs]) | |
if not fetched_text: | |
# Fallback or more targeted extraction if <p> tags are not primary content holders | |
fetched_text = soup.get_text(separator='\n', strip=True) | |
return fetched_text | |
# return f"Full content for {url} would be fetched here. This is a placeholder. Query context: {query_context}" | |
except Exception as e: | |
return f"Error fetching content from {url}: {str(e)}" | |
def search_duckduckgo(topic: str, max_results: int = 3) -> list: | |
""" | |
Searches DuckDuckGo for a given topic, retrieves search results, | |
and then attempts to fetch the full content of each result URL. | |
Args: | |
topic: The topic to search for. | |
max_results: The maximum number of search results to process. | |
Returns: | |
A list of dictionaries, where each dictionary represents a search result | |
and contains: | |
- 'title': The title of the search result. | |
- 'href': The URL of the search result. | |
- 'original_snippet': The original snippet from DuckDuckGo. | |
- 'full_content': The fetched full content from the URL (or an error message/placeholder). | |
""" | |
print(f"Searching DuckDuckGo for: {topic} (max_results: {max_results})") | |
detailed_results_list = [] | |
try: | |
# Get initial search results from DuckDuckGo | |
initial_results = DDGS().text(topic, max_results=max_results) | |
if not initial_results: | |
print("No initial results found from DuckDuckGo.") | |
return [] | |
print(f"Found {len(initial_results)} initial results. Now fetching full content...") | |
for result in initial_results: | |
title = result.get('title', 'N/A') | |
href = result.get('href', None) | |
original_snippet = result.get('body', 'N/A') | |
print(f"\nProcessing result: {title}") | |
print(f" URL: {href}") | |
full_content = "N/A" # Default if URL is missing or fetching fails | |
if href: | |
# Use the placeholder browsing tool to fetch full content | |
# Pass the original 'topic' as query_context for the browsing tool | |
full_content = browsing_tool_fetch_content(url=href, query_context=topic) | |
else: | |
print(" No URL found for this result, cannot fetch full content.") | |
full_content = "No URL provided in search result." | |
detailed_results_list.append({ | |
'title': title, | |
'href': href, | |
'original_snippet': original_snippet, | |
'full_content': full_content | |
}) | |
print(f" Full content (or placeholder/error): {full_content[:200]}...") # Print a snippet of fetched content | |
except Exception as e: | |
print(f"An error occurred during the search or content fetching process: {str(e)}") | |
# Optionally, return partial results or an empty list depending on desired error handling | |
# return detailed_results_list # Could return what was processed so far | |
return detailed_results_list['full_content'] | |
# @tool | |
# def search_duckduckgo(topic : str)-> list: | |
# """ | |
# Searches DuckDuckGo for a given topic and returns a list of results. | |
# Args: | |
# topic: The topic to search for. | |
# Returns: | |
# A list of dictionaries, where each dictionary represents a search result | |
# and contains keys like 'title', 'href', and 'body'. | |
# """ | |
# results = DDGS().text(topic, max_results=3) | |
# return results | |
def get_current_time_in_timezone(timezone: str) -> str: | |
"""A tool that fetches the current local time in a specified timezone. | |
Args: | |
timezone: A string representing a valid timezone (e.g., 'America/New_York'). | |
""" | |
try: | |
# Create timezone object | |
tz = pytz.timezone(timezone) | |
# Get current time in that timezone | |
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") | |
return f"The current local time in {timezone} is: {local_time}" | |
except Exception as e: | |
return f"Error fetching time for timezone '{timezone}': {str(e)}" | |
final_answer = FinalAnswerTool() | |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder: | |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' | |
os.environ["GOOGLE_API_KEY"] = "AIzaSyBcJrlnDDdWtjUDiLrisSOPuaAGizCLKO4" | |
gemini_api_key = os.environ.get("GOOGLE_API_KEY") | |
try: | |
# LiteLLM uses 'gemini/' prefix for Google AI Studio models | |
gemini_model = LiteLLMModel( | |
model_id="gemini/gemini-1.5-flash-latest", | |
api_key=gemini_api_key, | |
temperature = 0.5, | |
max_tokens = 2096, | |
custom_role_conversions=None | |
) | |
print("Successfully initialized LiteLLMModel for Gemini 1.5 Flash.") | |
except Exception as e: | |
print(f"Failed to initialize LiteLLMModel: {e}") | |
gemini_model = None | |
# model = HfApiModel( | |
# max_tokens=2096, | |
# temperature=0.5, | |
# model_id='google/gemma-2b-it',# it is possible that this model may be overloaded | |
# custom_role_conversions=None, | |
# ) | |
search_tool = DuckDuckGoSearchTool() | |
# Import tool from Hub | |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) | |
with open("prompts.yaml", 'r') as stream: | |
prompt_templates = yaml.safe_load(stream) | |
agent = CodeAgent( | |
model=gemini_model, | |
tools=[final_answer,get_current_time_in_timezone,search_duckduckgo, browsing_tool_fetch_content], ## add your tools here (don't remove final answer) | |
max_steps=6, | |
verbosity_level=1, | |
grammar=None, | |
planning_interval=None, | |
name=None, | |
description=None, | |
prompt_templates=prompt_templates | |
) | |
GradioUI(agent).launch() |