Spaces:
Runtime error
Runtime error
| from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool, LiteLLMModel, TransformersModel | |
| import datetime | |
| import requests | |
| import pytz | |
| import yaml | |
| from tools.final_answer import FinalAnswerTool | |
| from Gradio_UI import GradioUI | |
| # Below is an example of a tool that does nothing. Amaze us with your creativity ! | |
| def find_similar_songs(query: str)-> str: | |
| """Given a song title or artist name, search the web for "songs like [query]" or "if you like [query]" and return the top 5 links with titles and snippets | |
| Args: | |
| query: the song or artist that the user would like to find songs or artists similar to | |
| """ | |
| search_terms = f"songs like {query} or if you like {query}." | |
| results = ddg(search_terms) | |
| if not results: | |
| return f"No results found for '{query}'." | |
| md = f"### Similar songs to **{query}**\n\n" | |
| for item in results[:5]: | |
| # DuckDuckGoSearchTool typically gives fields like 'title', 'href', 'body' | |
| title = item.get("title") or item.get("heading", "Untitled") | |
| link = item.get("href") or item.get("link", "") | |
| snippet = item.get("body") or item.get("snippet", "") | |
| md += f"- **[{title}]({link})**\n" | |
| if snippet: | |
| md += f" > {snippet.strip()}\n" | |
| md += "\n" | |
| return md.strip() | |
| def get_current_time_in_timezone(timezone: str) -> str: | |
| """A tool that fetches the current local time in a specified timezone. | |
| Args: | |
| timezone: A string representing a valid timezone (e.g., 'America/New_York'). | |
| """ | |
| try: | |
| # Create timezone object | |
| tz = pytz.timezone(timezone) | |
| # Get current time in that timezone | |
| local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") | |
| return f"The current local time in {timezone} is: {local_time}" | |
| except Exception as e: | |
| return f"Error fetching time for timezone '{timezone}': {str(e)}" | |
| final_answer = FinalAnswerTool() | |
| ddg = DuckDuckGoSearchTool() | |
| # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder: | |
| # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' | |
| # model = HfApiModel( | |
| # max_tokens=2096, | |
| # temperature=0.5, | |
| # model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded | |
| # custom_role_conversions=None, | |
| # ) | |
| # model = LiteLLMModel( | |
| # model_id="ollama_chat/qwen2.5:7b", # Ollama model identifier | |
| # api_base="http://127.0.0.1:11434", # default Ollama HTTP port | |
| # num_ctx=8192, | |
| # temperature=0.5 | |
| # ) | |
| model = TransformersModel( | |
| model_id="TheBloke/vicuna-7B-1.1-HF", # or any other Hugging Face repo | |
| device="cuda", # or "cpu" | |
| max_new_tokens=1024, # how many tokens to generate | |
| temperature=0.5 # same as before | |
| # quantize="4bit" # optional if supported | |
| ) | |
| # Import tool from Hub | |
| image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) | |
| with open("prompts.yaml", 'r') as stream: | |
| prompt_templates = yaml.safe_load(stream) | |
| agent = CodeAgent( | |
| model=model, | |
| tools=[get_current_time_in_timezone, ddg], ## add your tools here (don't remove final answer) | |
| max_steps=6, | |
| verbosity_level=1, | |
| grammar=None, | |
| planning_interval=None, | |
| name=None, | |
| description=None, | |
| prompt_templates=prompt_templates | |
| ) | |
| GradioUI(agent).launch() |