|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import logging, os |
|
|
|
|
|
os.environ["CREWAI_TESTING"] = "TRUE" |
|
|
os.environ["CREWAI_TRACING_ENABLED"] = "0" |
|
|
|
|
|
from agents.models.llms import ( |
|
|
LLM_CREW_PLANNING, |
|
|
LLM_MANAGER, |
|
|
LLM_AGENT |
|
|
) |
|
|
from agents.tools.ai_tools import AITools |
|
|
from agents.tools.deterministic_tools import DeterministicTools |
|
|
from agents.tools.mcp_tools import MCPTools |
|
|
from crewai import Agent, Crew, Task, Process |
|
|
from crewai.agents.agent_builder.base_agent import BaseAgent |
|
|
from crewai.events.listeners.tracing.first_time_trace_handler import FirstTimeTraceHandler |
|
|
from crewai.project import CrewBase, agent, crew, task |
|
|
from google import genai |
|
|
from google.genai import types |
|
|
from phoenix.otel import register |
|
|
from typing import List |
|
|
from utils.utils import read_file_json, is_ext |
|
|
|
|
|
def _noop(self, *args, **kwargs): |
|
|
pass |
|
|
|
|
|
FirstTimeTraceHandler._display_ephemeral_trace_link = _noop |
|
|
FirstTimeTraceHandler._show_local_trace_message = _noop |
|
|
FirstTimeTraceHandler._show_tracing_declined_message = _noop |
|
|
|
|
|
|
|
|
|
|
|
PLANNING_CREW = True |
|
|
MEMORY_CREW = False |
|
|
VERBOSE_CREW = False |
|
|
|
|
|
MAX_ITER_MANAGER = 15 |
|
|
REASONING_MANAGER = True |
|
|
MAX_REASONING_ATTEMPTS_MANAGER = 3 |
|
|
VERBOSE_MANAGER = False |
|
|
|
|
|
MAX_ITER_AGENT = 15 |
|
|
REASONING_AGENT = True |
|
|
MAX_REASONING_ATTEMPTS_AGENT = 2 |
|
|
VERBOSE_AGENT = False |
|
|
|
|
|
os.environ["CHROMA_OPENAI_API_KEY"] = os.getenv("CHROMA_OPENAI_API_KEY") |
|
|
|
|
|
|
|
|
|
|
|
os.environ["PHOENIX_API_KEY"] = os.getenv("PHOENIX_API_KEY") |
|
|
os.environ["PHOENIX_COLLECTOR_ENDPOINT"] = "https://app.phoenix.arize.com/s/bstraehle/v1/traces" |
|
|
os.environ["PHOENIX_CLIENT_HEADERS"] = f"api-key={os.environ['PHOENIX_API_KEY']}" |
|
|
|
|
|
tracer_provider = register( |
|
|
project_name="gaia", |
|
|
protocol="http/protobuf", |
|
|
auto_instrument=True, |
|
|
batch=True, |
|
|
set_global_tracer_provider=True |
|
|
) |
|
|
|
|
|
logging.getLogger("openinference").setLevel(logging.CRITICAL) |
|
|
logging.getLogger("opentelemetry").setLevel(logging.CRITICAL) |
|
|
|
|
|
def _print_agent_config(agent_name: str, reasoning: bool, max_reasoning_attempts: int, max_iter: int, verbose: bool, llm: str, tools: str = None): |
|
|
print("") |
|
|
print(f"π€ {agent_name} reasoning: {reasoning}") |
|
|
print(f"π€ {agent_name} reasoning max attempts: {max_reasoning_attempts}") |
|
|
print(f"π€ {agent_name} max iterations: {max_iter}") |
|
|
print(f"π€ {agent_name} verbose: {verbose}") |
|
|
print(f"π§ {agent_name} llm: {llm}") |
|
|
if tools: |
|
|
print(f"π οΈ {agent_name} tools: {tools}") |
|
|
|
|
|
@CrewBase |
|
|
class GAIACrew(): |
|
|
agents: List[BaseAgent] |
|
|
tasks: List[Task] |
|
|
|
|
|
@agent |
|
|
def web_search_agent(self) -> Agent: |
|
|
_print_agent_config("Web search agent", REASONING_AGENT, MAX_REASONING_ATTEMPTS_AGENT, MAX_ITER_AGENT, VERBOSE_AGENT, LLM_AGENT, |
|
|
"AITools.web_search_tool") |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["web_search_agent"], |
|
|
allow_delegation=False, |
|
|
llm=LLM_AGENT, |
|
|
max_iter=MAX_ITER_AGENT, |
|
|
tools=[AITools.web_search_tool], |
|
|
reasoning=REASONING_AGENT, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_AGENT, |
|
|
verbose=VERBOSE_AGENT |
|
|
) |
|
|
|
|
|
@agent |
|
|
def web_browser_agent(self) -> Agent: |
|
|
_print_agent_config("Web browser agent", REASONING_AGENT, MAX_REASONING_ATTEMPTS_AGENT, MAX_ITER_AGENT, VERBOSE_AGENT, LLM_AGENT, |
|
|
"AITools.web_browser_tool") |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["web_browser_agent"], |
|
|
allow_delegation=False, |
|
|
llm=LLM_AGENT, |
|
|
max_iter=MAX_ITER_AGENT, |
|
|
tools=[AITools.web_browser_tool], |
|
|
reasoning=REASONING_AGENT, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_AGENT, |
|
|
verbose=VERBOSE_AGENT |
|
|
) |
|
|
|
|
|
@agent |
|
|
def chess_analysis_agent(self) -> Agent: |
|
|
_print_agent_config("Chess analysis agent", REASONING_AGENT, MAX_REASONING_ATTEMPTS_AGENT, MAX_ITER_AGENT, VERBOSE_AGENT, LLM_AGENT, |
|
|
"AITools.img_to_fen_tool, MCPTools.best_move_tool, AITools.algebraic_notation_tool") |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["chess_analysis_agent"], |
|
|
allow_delegation=False, |
|
|
llm=LLM_AGENT, |
|
|
max_iter=MAX_ITER_AGENT, |
|
|
tools=[AITools.img_to_fen_tool, MCPTools.best_move_tool, AITools.algebraic_notation_tool], |
|
|
reasoning=REASONING_AGENT, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_AGENT, |
|
|
verbose=VERBOSE_AGENT |
|
|
) |
|
|
|
|
|
@agent |
|
|
def image_analysis_agent(self) -> Agent: |
|
|
_print_agent_config("Image analysis agent", REASONING_AGENT, MAX_REASONING_ATTEMPTS_AGENT, MAX_ITER_AGENT, VERBOSE_AGENT, LLM_AGENT, |
|
|
"AITools.image_analysis_tool") |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["image_analysis_agent"], |
|
|
allow_delegation=False, |
|
|
llm=LLM_AGENT, |
|
|
max_iter=MAX_ITER_AGENT, |
|
|
tools=[AITools.image_analysis_tool], |
|
|
reasoning=REASONING_AGENT, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_AGENT, |
|
|
verbose=VERBOSE_AGENT |
|
|
) |
|
|
|
|
|
@agent |
|
|
def audio_analysis_agent(self) -> Agent: |
|
|
_print_agent_config("Audio analysis agent", REASONING_AGENT, MAX_REASONING_ATTEMPTS_AGENT, MAX_ITER_AGENT, VERBOSE_AGENT, LLM_AGENT, |
|
|
"AITools.audio_analysis_tool") |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["audio_analysis_agent"], |
|
|
allow_delegation=False, |
|
|
llm=LLM_AGENT, |
|
|
max_iter=MAX_ITER_AGENT, |
|
|
tools=[AITools.audio_analysis_tool], |
|
|
reasoning=REASONING_AGENT, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_AGENT, |
|
|
verbose=VERBOSE_AGENT |
|
|
) |
|
|
|
|
|
@agent |
|
|
def video_analysis_agent(self) -> Agent: |
|
|
_print_agent_config("Video analysis agent", REASONING_AGENT, MAX_REASONING_ATTEMPTS_AGENT, MAX_ITER_AGENT, VERBOSE_AGENT, LLM_AGENT, |
|
|
"AITools.video_analysis_tool") |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["video_analysis_agent"], |
|
|
allow_delegation=False, |
|
|
llm=LLM_AGENT, |
|
|
max_iter=MAX_ITER_AGENT, |
|
|
tools=[AITools.video_analysis_tool], |
|
|
reasoning=REASONING_AGENT, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_AGENT, |
|
|
verbose=VERBOSE_AGENT |
|
|
) |
|
|
|
|
|
@agent |
|
|
def youtube_analysis_agent(self) -> Agent: |
|
|
_print_agent_config("Youtube analysis agent", REASONING_AGENT, MAX_REASONING_ATTEMPTS_AGENT, MAX_ITER_AGENT, VERBOSE_AGENT, LLM_AGENT, |
|
|
"AITools.youtube_analysis_tool") |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["youtube_analysis_agent"], |
|
|
allow_delegation=False, |
|
|
llm=LLM_AGENT, |
|
|
max_iter=MAX_ITER_AGENT, |
|
|
tools=[AITools.youtube_analysis_tool], |
|
|
reasoning=REASONING_AGENT, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_AGENT, |
|
|
verbose=VERBOSE_AGENT |
|
|
) |
|
|
|
|
|
@agent |
|
|
def document_analysis_agent(self) -> Agent: |
|
|
_print_agent_config("Document analysis agent", REASONING_AGENT, MAX_REASONING_ATTEMPTS_AGENT, MAX_ITER_AGENT, VERBOSE_AGENT, LLM_AGENT, |
|
|
"AITools.document_analysis_tool") |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["document_analysis_agent"], |
|
|
allow_delegation=False, |
|
|
llm=LLM_AGENT, |
|
|
max_iter=MAX_ITER_AGENT, |
|
|
tools=[AITools.document_analysis_tool], |
|
|
reasoning=REASONING_AGENT, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_AGENT, |
|
|
verbose=VERBOSE_AGENT |
|
|
) |
|
|
|
|
|
@agent |
|
|
def arithmetic_agent(self) -> Agent: |
|
|
_print_agent_config("Arithmetic agent", REASONING_AGENT, MAX_REASONING_ATTEMPTS_AGENT, MAX_ITER_AGENT, VERBOSE_AGENT, LLM_AGENT, |
|
|
"DeterministicTools.add_tool, DeterministicTools.subtract_tool, DeterministicTools.multiply_tool, DeterministicTools.divide_tool, DeterministicTools.modulus_tool") |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["document_analysis_agent"], |
|
|
allow_delegation=False, |
|
|
llm=LLM_AGENT, |
|
|
max_iter=MAX_ITER_AGENT, |
|
|
tools=[DeterministicTools.add_tool, |
|
|
DeterministicTools.subtract_tool, |
|
|
DeterministicTools.multiply_tool, |
|
|
DeterministicTools.divide_tool, |
|
|
DeterministicTools.modulus_tool], |
|
|
reasoning=REASONING_AGENT, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_AGENT, |
|
|
verbose=VERBOSE_AGENT |
|
|
) |
|
|
|
|
|
@agent |
|
|
def code_generation_and_execution_agent(self) -> Agent: |
|
|
_print_agent_config("Code generation and execution agent", REASONING_AGENT, MAX_REASONING_ATTEMPTS_AGENT, MAX_ITER_AGENT, VERBOSE_AGENT, LLM_AGENT, |
|
|
"AITools.code_generation_and_execution_tool") |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["code_generation_and_execution_agent"], |
|
|
allow_delegation=False, |
|
|
llm=LLM_AGENT, |
|
|
max_iter=MAX_ITER_AGENT, |
|
|
tools=[AITools.code_generation_and_execution_tool], |
|
|
reasoning=REASONING_AGENT, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_AGENT, |
|
|
verbose=VERBOSE_AGENT |
|
|
) |
|
|
|
|
|
@agent |
|
|
def code_execution_agent(self) -> Agent: |
|
|
_print_agent_config("Code execution agent", REASONING_AGENT, MAX_REASONING_ATTEMPTS_AGENT, MAX_ITER_AGENT, VERBOSE_AGENT, LLM_AGENT, |
|
|
"AITools.code_execution_tool") |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["code_execution_agent"], |
|
|
allow_delegation=False, |
|
|
llm=LLM_AGENT, |
|
|
max_iter=MAX_ITER_AGENT, |
|
|
tools=[AITools.code_execution_tool], |
|
|
reasoning=REASONING_AGENT, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_AGENT, |
|
|
verbose=VERBOSE_AGENT |
|
|
) |
|
|
|
|
|
@agent |
|
|
def manager_agent(self) -> Agent: |
|
|
_print_agent_config("Manager", REASONING_MANAGER, MAX_REASONING_ATTEMPTS_MANAGER, MAX_ITER_MANAGER, VERBOSE_MANAGER, LLM_MANAGER) |
|
|
|
|
|
return Agent( |
|
|
config=self.agents_config["manager_agent"], |
|
|
allow_delegation=True, |
|
|
llm=LLM_MANAGER, |
|
|
max_iter=MAX_ITER_MANAGER, |
|
|
reasoning=REASONING_MANAGER, |
|
|
max_reasoning_attempts=MAX_REASONING_ATTEMPTS_MANAGER, |
|
|
verbose=VERBOSE_MANAGER |
|
|
) |
|
|
|
|
|
@task |
|
|
def manager_task(self) -> Task: |
|
|
return Task( |
|
|
config=self.tasks_config["manager_task"] |
|
|
) |
|
|
|
|
|
@crew |
|
|
def crew(self) -> Crew: |
|
|
print("") |
|
|
print(f"π€ Crew planning: {PLANNING_CREW}") |
|
|
print(f"π€ Crew memory: {MEMORY_CREW}") |
|
|
print(f"π€ Crew verbose: {VERBOSE_CREW}") |
|
|
print(f"π§ Crew planning LLM: {LLM_CREW_PLANNING}") |
|
|
|
|
|
return Crew( |
|
|
agents=self.agents, |
|
|
tasks=self.tasks, |
|
|
process=Process.sequential, |
|
|
planning_llm=LLM_CREW_PLANNING, |
|
|
planning=PLANNING_CREW, |
|
|
memory=MEMORY_CREW, |
|
|
verbose=VERBOSE_CREW, |
|
|
tracing=False |
|
|
) |
|
|
|
|
|
def _get_final_question(question, file_path): |
|
|
final_question = question |
|
|
|
|
|
if file_path: |
|
|
if is_ext(file_path, ".csv") or is_ext(file_path, ".xls") or is_ext(file_path, ".xlsx") or is_ext(file_path, ".json") or is_ext(file_path, ".jsonl"): |
|
|
json_data = read_file_json(file_path) |
|
|
final_question = f"{question} JSON data:\n{json_data}." |
|
|
else: |
|
|
final_question = f"{question} File path: {file_path}." |
|
|
|
|
|
return final_question |
|
|
|
|
|
def run_crew(question, file_path): |
|
|
print("") |
|
|
print(f"π€ Question: {question}") |
|
|
|
|
|
final_question = _get_final_question(question, file_path) |
|
|
|
|
|
print("") |
|
|
print(f"π€ Crew execution started") |
|
|
|
|
|
answer = GAIACrew().crew().kickoff(inputs={"question": final_question}) |
|
|
|
|
|
print("") |
|
|
print(f"π€ Crew execution completed") |
|
|
|
|
|
final_answer = AITools.final_answer_tool(question, answer) |
|
|
|
|
|
print("") |
|
|
print(f"π€ Answer: {final_answer}") |
|
|
|
|
|
return final_answer |