|
|
|
|
|
import os |
|
|
from smolagents import MultiStepAgent, CodeAgent, OpenAIServerModel, FinalAnswerTool, PythonInterpreterTool |
|
|
|
|
|
from tools.smol_tools import * |
|
|
|
|
|
import prompts.custom_prompts as custom_prompts |
|
|
|
|
|
from openai import OpenAI |
|
|
|
|
|
from models import PatchedOpenAIServerModel |
|
|
|
|
|
from dotenv import load_dotenv |
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
openai_api_key = os.getenv("OPENAI_API_KEY") |
|
|
openrouter_api_key = os.getenv("OPENROUTER_API_KEY") |
|
|
|
|
|
|
|
|
gpt_4o_model = OpenAIServerModel( |
|
|
model_id="gpt-4o", |
|
|
api_key=openai_api_key, |
|
|
) |
|
|
|
|
|
gpt_41_model = OpenAIServerModel( |
|
|
model_id="gpt-4.1", |
|
|
api_key=openai_api_key, |
|
|
) |
|
|
|
|
|
gpt_41_mini_model = OpenAIServerModel( |
|
|
model_id="gpt-4.1-mini", |
|
|
api_key=openai_api_key, |
|
|
) |
|
|
|
|
|
o4_mini_model = PatchedOpenAIServerModel( |
|
|
model_id="o4-mini", |
|
|
api_key=openai_api_key, |
|
|
) |
|
|
|
|
|
deepseek_v3_model = OpenAIServerModel( |
|
|
model_id="deepseek/deepseek-chat-v3-0324:free", |
|
|
api_key=openrouter_api_key, |
|
|
api_base="https://openrouter.ai/api/v1/" |
|
|
) |
|
|
|
|
|
deepseek_r1_model = OpenAIServerModel( |
|
|
model_id="deepseek/deepseek-r1-0528:free", |
|
|
api_key=openrouter_api_key, |
|
|
api_base="https://openrouter.ai/api/v1/", |
|
|
) |
|
|
|
|
|
llma_3_2_vision_model = OpenAIServerModel( |
|
|
model_id="meta-llama/llama-3.2-11b-vision-instruct:free", |
|
|
api_key=openrouter_api_key, |
|
|
api_base="https://openrouter.ai/api/v1/", |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
VERBOSITY_LEVEL = 0 |
|
|
|
|
|
def extract_final_answer(result): |
|
|
if isinstance(result, dict) and "final_answer" in result: |
|
|
return str(result["final_answer"]).strip() |
|
|
return "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
HACKER_TOOLS = [ |
|
|
WikipediaSearchTool(), |
|
|
|
|
|
VisitWebpageTool(), |
|
|
|
|
|
|
|
|
Multiply(), |
|
|
Divide(), |
|
|
Add(), |
|
|
Subtract(), |
|
|
Modulus(), |
|
|
FinalAnswerTool() |
|
|
] |
|
|
|
|
|
SCOUT_TOOLS = [ |
|
|
SpeechToTextTool(), |
|
|
FinalAnswerTool(), |
|
|
AnalyzeImage(), |
|
|
YouTubeTranscript(), |
|
|
] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
AUTHORIZED_IMPORTS = [ |
|
|
|
|
|
|
|
|
"pandas", |
|
|
"numpy", |
|
|
"bs4", |
|
|
"requests", |
|
|
"markdownify" |
|
|
] |
|
|
|
|
|
|
|
|
HACKER = CodeAgent( |
|
|
name="hacker", |
|
|
description = custom_prompts.HACKER_DESCRIPTION, |
|
|
model=gpt_41_mini_model, |
|
|
tools=HACKER_TOOLS, |
|
|
verbosity_level=VERBOSITY_LEVEL, |
|
|
add_base_tools=True, |
|
|
additional_authorized_imports=AUTHORIZED_IMPORTS, |
|
|
max_steps=6 |
|
|
) |
|
|
|
|
|
SCOUT = CodeAgent( |
|
|
name="scout", |
|
|
description = custom_prompts.SCOUT_DESCRIPTION, |
|
|
model=gpt_41_mini_model, |
|
|
tools=SCOUT_TOOLS, |
|
|
verbosity_level=VERBOSITY_LEVEL, |
|
|
add_base_tools=True, |
|
|
additional_authorized_imports=AUTHORIZED_IMPORTS, |
|
|
max_steps=6 |
|
|
) |
|
|
|
|
|
THINKER = CodeAgent( |
|
|
name="thinker", |
|
|
description = custom_prompts.THINKER_DESCRIPTION, |
|
|
model=o4_mini_model, |
|
|
tools=[FinalAnswerTool()], |
|
|
verbosity_level=VERBOSITY_LEVEL, |
|
|
add_base_tools=True, |
|
|
additional_authorized_imports=AUTHORIZED_IMPORTS, |
|
|
max_steps=6 |
|
|
) |
|
|
|
|
|
|
|
|
COMMANDER = CodeAgent( |
|
|
name="commander", |
|
|
description = custom_prompts.COMMANDER_DESCRIPTION, |
|
|
model=gpt_41_mini_model, |
|
|
add_base_tools=True, |
|
|
tools=[PythonInterpreterTool(), FinalAnswerTool()], |
|
|
managed_agents=[HACKER, SCOUT, THINKER], |
|
|
planning_interval=2, |
|
|
verbosity_level=VERBOSITY_LEVEL, |
|
|
|
|
|
prompt_templates=custom_prompts.commander_prompt_templates, |
|
|
max_steps=12, |
|
|
) |
|
|
|
|
|
class TaskForce: |
|
|
def __init__(self): |
|
|
self.commander = COMMANDER |
|
|
|
|
|
def __call__(self, question: str) -> str: |
|
|
try: |
|
|
question = "\nTHE QUESTION:\n" + question + '\n' + custom_prompts.output_prompt |
|
|
|
|
|
|
|
|
answer = self.commander.run(question) |
|
|
|
|
|
|
|
|
|
|
|
return answer |
|
|
except Exception as e: |
|
|
error = f"An error occurred while processing the question: {e}" |
|
|
print(error) |
|
|
return error |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
task_force = TaskForce() |
|
|
question = "What is the capital of France?" |
|
|
answer = HACKER.run(question) |
|
|
|
|
|
print(f"Answer: {answer}") |