Spaces:
Sleeping
Sleeping
File size: 3,309 Bytes
ebe027e 9b5b26a c19d193 6aae614 8fe992b 9b5b26a c44fd98 ed1c2b6 c44fd98 9b5b26a 5df72d6 9b5b26a 3d1237b 9b5b26a c44fd98 76ce70e ed1c2b6 c44fd98 19c8232 c44fd98 9b5b26a 8c01ffb 6aae614 ae7a494 8c01ffb 36fea3e 89c5fbe 36fea3e a7c798d be71535 549cb73 361085c be71535 549cb73 36fea3e 8c01ffb c44fd98 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b d2f6a24 8c01ffb 861422e 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
from smolagents import LiteLLMModel
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
from gradio_client import Client
from PIL import Image
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
#Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
# -- my tool
# @tool
def get_image(img_description:str) -> Image.Image: #it's import to specify the return type
#Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that generate an image from a textual description
Args:
img_description: a sting containing the description of the image the user wants to get
"""
# client = Client("agents-course/text-to-image")
# result = client.predict(
# param_0="Hello!!",
# api_name="/predict"
# )
return image_generation_tool.forward(img_description)
# -- my tool end
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
# model = HfApiModel(
# max_tokens=2096,
# temperature=0.5,
# model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
# custom_role_conversions=None,
# )
model = LiteLLMModel(
# model_id="ollama_chat/qwen2:7b", # Or try other Ollama-supported models
# model_id="ollama/qwen2:7b", # Or try other Ollama-supported models
model_id="ollama_chat/gemma3:1b",
api_base="http://127.0.0.1:11434", # Default Ollama local server
# api_base="http://0.0.0.0:11434",
# num_ctx=8192,
)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |