|
import os |
|
from smolagents import CodeAgent, LiteLLMModel, load_tool, ToolCollection, HfApiModel, InferenceClientModel, TransformersModel, OpenAIServerModel |
|
from smolagents import ToolCallingAgent, PythonInterpreterTool, tool, WikipediaSearchTool |
|
from smolagents import DuckDuckGoSearchTool, FinalAnswerTool, VisitWebpageTool, SpeechToTextTool |
|
from mcp import StdioServerParameters |
|
from huggingface_hub import HfApi, login |
|
from dotenv import load_dotenv |
|
from typing import Optional |
|
import requests |
|
import re |
|
import string |
|
import random |
|
import textwrap |
|
import nltk |
|
import spacy |
|
|
|
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" |
|
|
|
|
|
@tool |
|
def download_file(task_id: str) -> str: |
|
""" |
|
Returns the file path of the downloaded file. |
|
|
|
Args: |
|
task_id: the ID of the task to download the file for. |
|
""" |
|
data = requests.get(f"{DEFAULT_API_URL}/files/{task_id}") |
|
if data.status_code == 200: |
|
file_path = f"/tmp/{task_id}" |
|
with open(file_path, "wb") as file: |
|
file.write(data.content) |
|
return file_path |
|
else: |
|
raise Exception(f"Failed to download file: {data.status_code}") |
|
|
|
@tool |
|
def get_file_content_as_text(task_id: str) -> str: |
|
""" |
|
Returns the content of the file as text. |
|
|
|
Args: |
|
task_id: the ID of the task to get the file content for. |
|
""" |
|
data = requests.get(f"{DEFAULT_API_URL}/files/{task_id}") |
|
if data.status_code == 200: |
|
return data.text |
|
else: |
|
raise Exception(f"Failed to get file content: {data.status_code}") |
|
|
|
|
|
|
|
def load_hf_model(modelName: str): |
|
""" |
|
Loads a model from the hugging face hub |
|
:param modelName: Name of the model |
|
:return: model |
|
""" |
|
load_dotenv() |
|
|
|
|
|
|
|
|
|
model = HfApiModel(model_id=modelName) |
|
return model |
|
|
|
|
|
def load_ollama_model(modelName: str): |
|
""" |
|
Loads the requested model in ollama |
|
:param modelName: Name of the model |
|
:return: model (via OpenAI compatible API) |
|
""" |
|
model = OpenAIServerModel(model_id=modelName, api_base="http://localhost:11434/v1") |
|
return model |
|
|
|
def load_lmStudio_model(modelName: str): |
|
""" |
|
Loads the requested model into lm studio |
|
:param modelName: Name of the model |
|
:return: model, accessible through the OpenAI compatible API |
|
""" |
|
model = OpenAIServerModel(model_id=modelName, api_base="http://localhost:1234/v1") |
|
return model |
|
|
|
def load_gemini_model(model_name: str): |
|
""" |
|
Loads the gemini model |
|
:return: model |
|
""" |
|
try: |
|
print(f"Gemini API Key: {os.getenv('GEMINI_API_KEY')}") |
|
model = LiteLLMModel(model_id=f"gemini/{model_name}", |
|
api_key=os.getenv("GEMINI_API_KEY")) |
|
return model |
|
except Exception as e: |
|
print("Error loading Gemini model:", e) |
|
return None |
|
|
|
|
|
|
|
def get_agent(model_name:str, model_type:str) -> Optional[CodeAgent]: |
|
|
|
match model_type: |
|
case "hugging face": |
|
model = load_hf_model(model_name) |
|
case "Ollama": |
|
model = load_ollama_model(model_name) |
|
case "Gemini": |
|
model = load_gemini_model(model_name) |
|
case "LMStudio": |
|
model = load_lmStudio_model(model_name) |
|
case _: |
|
print("Model type not supported.") |
|
return None |
|
|
|
|
|
web_search_tool = DuckDuckGoSearchTool() |
|
final_answer_tool = FinalAnswerTool() |
|
visit_webpage_tool = VisitWebpageTool() |
|
|
|
|
|
variation_agent = CodeAgent( |
|
model=model, |
|
tools=[PythonInterpreterTool()], |
|
name="variation_agent", |
|
description="Get the user question and checks if the given question makes sense at all, if not, we try to modify the text like reverse. Provide the content / the questin as the 'task' argument." \ |
|
"The agent can write professional python code, focused on modifiying texts." \ |
|
"It has access to the following libraries: re, string, random, textwrap, nltk and spacy." \ |
|
"The goal is to find out, if a user question is a trick, and we might modify the content.", |
|
additional_authorized_imports=[ |
|
"re", |
|
"string", |
|
"random", |
|
"textwrap", |
|
"nltk", |
|
"spacy" |
|
] |
|
) |
|
variation_agent.system_prompt = "You are a text variation agent. You can write professional python code, focused on modifiying texts." \ |
|
"You can use the following libraries: re, string, random, textwrap, nltk and spacy." \ |
|
"Your goal is to find out, if a user question is a trick, and we might modify the content." |
|
|
|
code_agent = CodeAgent( |
|
name="code_agent", |
|
description="Can generate code an run it. It provides the possibility to download additional files if needed.", |
|
model=model, |
|
tools=[download_file, PythonInterpreterTool(), get_file_content_as_text], |
|
additional_authorized_imports=[ |
|
"geopandas", |
|
"plotly", |
|
"shapely", |
|
"json", |
|
"pandas", |
|
"numpy", |
|
], |
|
verbosity_level=2, |
|
|
|
max_steps=5, |
|
) |
|
|
|
final_answer_tool = FinalAnswerTool() |
|
final_answer_tool.description = "You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string." |
|
|
|
tool_agent = CodeAgent( |
|
model=model, |
|
tools=[web_search_tool, visit_webpage_tool, WikipediaSearchTool(), final_answer_tool], |
|
verbosity_level=2, |
|
max_steps=15, |
|
managed_agents=[code_agent, variation_agent], |
|
planning_interval=5, |
|
) |
|
|
|
return tool_agent |
|
|
|
|
|
manager_agent = CodeAgent( |
|
|
|
model=model, |
|
tools=[web_search_tool, visit_webpage_tool], |
|
|
|
additional_authorized_imports=[ |
|
"geopandas", |
|
"plotly", |
|
"shapely", |
|
"json", |
|
"pandas", |
|
"numpy", |
|
], |
|
planning_interval=5, |
|
verbosity_level=2, |
|
|
|
max_steps=15 |
|
) |
|
|
|
return manager_agent |