eaglelandsonce commited on
Commit
e5c3819
1 Parent(s): 32817f7

Upload 32 files

Browse files
crewai/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from crewai.agent import Agent
2
+ from crewai.crew import Crew
3
+ from crewai.process import Process
4
+ from crewai.task import Task
crewai/agent.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+ from typing import Any, List, Optional
3
+
4
+ from langchain.agents.format_scratchpad import format_log_to_str
5
+ from langchain.memory import ConversationSummaryMemory
6
+ from langchain.tools.render import render_text_description
7
+ from langchain_core.runnables.config import RunnableConfig
8
+ from langchain_openai import ChatOpenAI
9
+ from pydantic import (
10
+ UUID4,
11
+ BaseModel,
12
+ ConfigDict,
13
+ Field,
14
+ InstanceOf,
15
+ field_validator,
16
+ model_validator,
17
+ )
18
+ from pydantic_core import PydanticCustomError
19
+
20
+ from crewai.agents import (
21
+ CacheHandler,
22
+ CrewAgentExecutor,
23
+ CrewAgentOutputParser,
24
+ ToolsHandler,
25
+ )
26
+ from crewai.prompts import Prompts
27
+
28
+
29
+ class Agent(BaseModel):
30
+ """Represents an agent in a system.
31
+
32
+ Each agent has a role, a goal, a backstory, and an optional language model (llm).
33
+ The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
34
+
35
+ Attributes:
36
+ agent_executor: An instance of the CrewAgentExecutor class.
37
+ role: The role of the agent.
38
+ goal: The objective of the agent.
39
+ backstory: The backstory of the agent.
40
+ llm: The language model that will run the agent.
41
+ max_iter: Maximum number of iterations for an agent to execute a task.
42
+ memory: Whether the agent should have memory or not.
43
+ verbose: Whether the agent execution should be in verbose mode.
44
+ allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
45
+ """
46
+
47
+ __hash__ = object.__hash__
48
+ model_config = ConfigDict(arbitrary_types_allowed=True)
49
+ id: UUID4 = Field(
50
+ default_factory=uuid.uuid4,
51
+ frozen=True,
52
+ description="Unique identifier for the object, not set by user.",
53
+ )
54
+ role: str = Field(description="Role of the agent")
55
+ goal: str = Field(description="Objective of the agent")
56
+ backstory: str = Field(description="Backstory of the agent")
57
+ llm: Optional[Any] = Field(
58
+ default_factory=lambda: ChatOpenAI(
59
+ temperature=0.7,
60
+ model_name="gpt-4",
61
+ ),
62
+ description="Language model that will run the agent.",
63
+ )
64
+ memory: bool = Field(
65
+ default=True, description="Whether the agent should have memory or not"
66
+ )
67
+ verbose: bool = Field(
68
+ default=False, description="Verbose mode for the Agent Execution"
69
+ )
70
+ allow_delegation: bool = Field(
71
+ default=True, description="Allow delegation of tasks to agents"
72
+ )
73
+ tools: List[Any] = Field(
74
+ default_factory=list, description="Tools at agents disposal"
75
+ )
76
+ max_iter: Optional[int] = Field(
77
+ default=15, description="Maximum iterations for an agent to execute a task"
78
+ )
79
+ agent_executor: Optional[InstanceOf[CrewAgentExecutor]] = Field(
80
+ default=None, description="An instance of the CrewAgentExecutor class."
81
+ )
82
+ tools_handler: Optional[InstanceOf[ToolsHandler]] = Field(
83
+ default=None, description="An instance of the ToolsHandler class."
84
+ )
85
+ cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
86
+ default=CacheHandler(), description="An instance of the CacheHandler class."
87
+ )
88
+
89
+ @field_validator("id", mode="before")
90
+ @classmethod
91
+ def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
92
+ if v:
93
+ raise PydanticCustomError(
94
+ "may_not_set_field", "This field is not to be set by the user.", {}
95
+ )
96
+
97
+ @model_validator(mode="after")
98
+ def check_agent_executor(self) -> "Agent":
99
+ if not self.agent_executor:
100
+ self.set_cache_handler(self.cache_handler)
101
+ return self
102
+
103
+ def execute_task(
104
+ self, task: str, context: str = None, tools: List[Any] = None
105
+ ) -> str:
106
+ """Execute a task with the agent.
107
+
108
+ Args:
109
+ task: Task to execute.
110
+ context: Context to execute the task in.
111
+ tools: Tools to use for the task.
112
+
113
+ Returns:
114
+ Output of the agent
115
+ """
116
+ if context:
117
+ task = "\n".join(
118
+ [task, "\nThis is the context you are working with:", context]
119
+ )
120
+
121
+ tools = tools or self.tools
122
+ self.agent_executor.tools = tools
123
+
124
+ return self.agent_executor.invoke(
125
+ {
126
+ "input": task,
127
+ "tool_names": self.__tools_names(tools),
128
+ "tools": render_text_description(tools),
129
+ },
130
+ RunnableConfig(callbacks=[self.tools_handler]),
131
+ )["output"]
132
+
133
+ def set_cache_handler(self, cache_handler) -> None:
134
+ self.cache_handler = cache_handler
135
+ self.tools_handler = ToolsHandler(cache=self.cache_handler)
136
+ self.__create_agent_executor()
137
+
138
+ def __create_agent_executor(self) -> CrewAgentExecutor:
139
+ """Create an agent executor for the agent.
140
+
141
+ Returns:
142
+ An instance of the CrewAgentExecutor class.
143
+ """
144
+ agent_args = {
145
+ "input": lambda x: x["input"],
146
+ "tools": lambda x: x["tools"],
147
+ "tool_names": lambda x: x["tool_names"],
148
+ "agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
149
+ }
150
+ executor_args = {
151
+ "tools": self.tools,
152
+ "verbose": self.verbose,
153
+ "handle_parsing_errors": True,
154
+ "max_iterations": self.max_iter,
155
+ }
156
+
157
+ if self.memory:
158
+ summary_memory = ConversationSummaryMemory(
159
+ llm=self.llm, memory_key="chat_history", input_key="input"
160
+ )
161
+ executor_args["memory"] = summary_memory
162
+ agent_args["chat_history"] = lambda x: x["chat_history"]
163
+ prompt = Prompts().task_execution_with_memory()
164
+ else:
165
+ prompt = Prompts().task_execution()
166
+
167
+ execution_prompt = prompt.partial(
168
+ goal=self.goal,
169
+ role=self.role,
170
+ backstory=self.backstory,
171
+ )
172
+
173
+ bind = self.llm.bind(stop=["\nObservation"])
174
+ inner_agent = (
175
+ agent_args
176
+ | execution_prompt
177
+ | bind
178
+ | CrewAgentOutputParser(
179
+ tools_handler=self.tools_handler, cache=self.cache_handler
180
+ )
181
+ )
182
+ self.agent_executor = CrewAgentExecutor(agent=inner_agent, **executor_args)
183
+
184
+ @staticmethod
185
+ def __tools_names(tools) -> str:
186
+ return ", ".join([t.name for t in tools])
187
+
188
+
189
+
190
+ '''
191
+
192
+ import uuid
193
+ from typing import Any, List, Optional
194
+
195
+ from langchain.prompts.chat import (
196
+ ChatPromptTemplate,
197
+ HumanMessagePromptTemplate,
198
+ SystemMessagePromptTemplate,
199
+ )
200
+ from langchain.schema import HumanMessage, SystemMessage
201
+ from langchain_openai import ChatOpenAI
202
+
203
+ # from langchain_google_genai import ChatGoogleGenerativeAI
204
+
205
+
206
+
207
+ from langchain.agents.format_scratchpad import format_log_to_str
208
+ from langchain.memory import ConversationSummaryMemory
209
+
210
+
211
+ from langchain.tools.render import render_text_description
212
+ from langchain_core.runnables.config import RunnableConfig
213
+ from pydantic import (
214
+
215
+
216
+ UUID4,
217
+ BaseModel,
218
+ ConfigDict,
219
+ Field,
220
+ InstanceOf,
221
+ field_validator,
222
+ model_validator,
223
+ )
224
+ from pydantic_core import PydanticCustomError
225
+
226
+ from crewai.agents import (
227
+ CacheHandler,
228
+ CrewAgentExecutor,
229
+ CrewAgentOutputParser,
230
+ ToolsHandler,
231
+ )
232
+ from crewai.prompts import Prompts
233
+
234
+
235
+ class Agent(BaseModel):
236
+ """Represents an agent in a system.
237
+
238
+ Each agent has a role, a goal, a backstory, and an optional language model (llm).
239
+ The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
240
+
241
+ Attributes:
242
+ agent_executor: An instance of the CrewAgentExecutor class.
243
+ role: The role of the agent.
244
+ goal: The objective of the agent.
245
+ backstory: The backstory of the agent.
246
+ llm: The language model that will run the agent.
247
+ memory: Whether the agent should have memory or not.
248
+ verbose: Whether the agent execution should be in verbose mode.
249
+ allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
250
+ """
251
+
252
+ __hash__ = object.__hash__
253
+ model_config = ConfigDict(arbitrary_types_allowed=True)
254
+ id: UUID4 = Field(
255
+ default_factory=uuid.uuid4,
256
+ frozen=True,
257
+ description="Unique identifier for the object, not set by user.",
258
+ )
259
+ role: str = Field(description="Role of the agent")
260
+ goal: str = Field(description="Objective of the agent")
261
+ backstory: str = Field(description="Backstory of the agent")
262
+ llm: Optional[Any] = Field(
263
+ default_factory=lambda: ChatOpenAI(
264
+ temperature=0.7,
265
+ model_name="gpt-4",
266
+ ),
267
+ description="Language model that will run the agent.",
268
+ )
269
+ memory: bool = Field(
270
+ default=True, description="Whether the agent should have memory or not"
271
+ )
272
+ verbose: bool = Field(
273
+ default=False, description="Verbose mode for the Agent Execution"
274
+ )
275
+ allow_delegation: bool = Field(
276
+ default=True, description="Allow delegation of tasks to agents"
277
+ )
278
+ tools: List[Any] = Field(
279
+ default_factory=list, description="Tools at agents disposal"
280
+ )
281
+ agent_executor: Optional[InstanceOf[CrewAgentExecutor]] = Field(
282
+ default=None, description="An instance of the CrewAgentExecutor class."
283
+ )
284
+ tools_handler: Optional[InstanceOf[ToolsHandler]] = Field(
285
+ default=None, description="An instance of the ToolsHandler class."
286
+ )
287
+ cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
288
+ default=CacheHandler(), description="An instance of the CacheHandler class."
289
+ )
290
+
291
+ @field_validator("id", mode="before")
292
+ @classmethod
293
+ def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
294
+ if v:
295
+ raise PydanticCustomError(
296
+ "may_not_set_field", "This field is not to be set by the user.", {}
297
+ )
298
+
299
+ @model_validator(mode="after")
300
+ def check_agent_executor(self) -> "Agent":
301
+ if not self.agent_executor:
302
+ self.set_cache_handler(self.cache_handler)
303
+ return self
304
+
305
+ def execute_task(
306
+ self, task: str, context: str = None, tools: List[Any] = None
307
+ ) -> str:
308
+ """Execute a task with the agent.
309
+
310
+ Args:
311
+ task: Task to execute.
312
+ context: Context to execute the task in.
313
+ tools: Tools to use for the task.
314
+
315
+ Returns:
316
+ Output of the agent
317
+ """
318
+ if context:
319
+ task = "\n".join(
320
+ [task, "\nThis is the context you are working with:", context]
321
+ )
322
+
323
+ tools = tools or self.tools
324
+ self.agent_executor.tools = tools
325
+
326
+ return self.agent_executor.invoke(
327
+ {
328
+ "input": task,
329
+ "tool_names": self.__tools_names(tools),
330
+ "tools": render_text_description(tools),
331
+ },
332
+ RunnableConfig(callbacks=[self.tools_handler]),
333
+ )["output"]
334
+
335
+ def set_cache_handler(self, cache_handler) -> None:
336
+ self.cache_handler = cache_handler
337
+ self.tools_handler = ToolsHandler(cache=self.cache_handler)
338
+ self.__create_agent_executor()
339
+
340
+ def __create_agent_executor(self) -> CrewAgentExecutor:
341
+ """Create an agent executor for the agent.
342
+
343
+ Returns:
344
+ An instance of the CrewAgentExecutor class.
345
+ """
346
+ agent_args = {
347
+ "input": lambda x: x["input"],
348
+ "tools": lambda x: x["tools"],
349
+ "tool_names": lambda x: x["tool_names"],
350
+ "agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
351
+ }
352
+ executor_args = {
353
+ "tools": self.tools,
354
+ "verbose": self.verbose,
355
+ "handle_parsing_errors": True,
356
+ }
357
+
358
+ if self.memory:
359
+ summary_memory = ConversationSummaryMemory(
360
+ llm=self.llm, memory_key="chat_history", input_key="input"
361
+ )
362
+ executor_args["memory"] = summary_memory
363
+ agent_args["chat_history"] = lambda x: x["chat_history"]
364
+ prompt = Prompts.TASK_EXECUTION_WITH_MEMORY_PROMPT
365
+ else:
366
+ prompt = Prompts.TASK_EXECUTION_PROMPT
367
+
368
+ execution_prompt = prompt.partial(
369
+ goal=self.goal,
370
+ role=self.role,
371
+ backstory=self.backstory,
372
+ )
373
+
374
+ bind = self.llm.bind(stop=["\nObservation"])
375
+ inner_agent = (
376
+ agent_args
377
+ | execution_prompt
378
+ | bind
379
+ | CrewAgentOutputParser(
380
+ tools_handler=self.tools_handler, cache=self.cache_handler
381
+ )
382
+ )
383
+ self.agent_executor = CrewAgentExecutor(agent=inner_agent, **executor_args)
384
+
385
+ @staticmethod
386
+ def __tools_names(tools) -> str:
387
+ return ", ".join([t.name for t in tools])
388
+
389
+ '''
crewai/agents/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .cache.cache_handler import CacheHandler
2
+ from .executor import CrewAgentExecutor
3
+ from .output_parser import CrewAgentOutputParser
4
+ from .tools_handler import ToolsHandler
crewai/agents/cache/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .cache_handler import CacheHandler
2
+ from .cache_hit import CacheHit
crewai/agents/cache/cache_handler.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from pydantic import PrivateAttr
4
+
5
+
6
+ class CacheHandler:
7
+ """Callback handler for tool usage."""
8
+
9
+ _cache: PrivateAttr = {}
10
+
11
+ def __init__(self):
12
+ self._cache = {}
13
+
14
+ def add(self, tool, input, output):
15
+ input = input.strip()
16
+ self._cache[f"{tool}-{input}"] = output
17
+
18
+ def read(self, tool, input) -> Optional[str]:
19
+ input = input.strip()
20
+ return self._cache.get(f"{tool}-{input}")
crewai/agents/cache/cache_hit.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ from .cache_handler import CacheHandler
6
+
7
+
8
+ class CacheHit(BaseModel):
9
+ """Cache Hit Object."""
10
+
11
+ class Config:
12
+ arbitrary_types_allowed = True
13
+
14
+ # Making it Any instead of AgentAction to avoind
15
+ # pydantic v1 vs v2 incompatibility, langchain should
16
+ # soon be updated to pydantic v2
17
+ action: Any = Field(description="Action taken")
18
+ cache: CacheHandler = Field(description="Cache Handler for the tool")
crewai/agents/exceptions.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.exceptions import OutputParserException
2
+
3
+
4
+ class TaskRepeatedUsageException(OutputParserException):
5
+ """Exception raised when a task is used twice in a roll."""
6
+
7
+ error: str = "TaskRepeatedUsageException"
8
+ message: str = "I just used the {tool} tool with input {tool_input}. So I already know the result of that and don't need to use it now.\n"
9
+
10
+ def __init__(self, tool: str, tool_input: str, text: str):
11
+ self.text = text
12
+ self.tool = tool
13
+ self.tool_input = tool_input
14
+ self.message = self.message.format(tool=tool, tool_input=tool_input)
15
+
16
+ super().__init__(
17
+ error=self.error,
18
+ observation=self.message,
19
+ send_to_llm=True,
20
+ llm_output=self.text,
21
+ )
22
+
23
+ def __str__(self):
24
+ return self.message
crewai/agents/executor.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from textwrap import dedent
3
+ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
4
+
5
+ from langchain.agents import AgentExecutor
6
+ from langchain.agents.agent import ExceptionTool
7
+ from langchain.agents.tools import InvalidTool
8
+ from langchain.callbacks.manager import CallbackManagerForChainRun
9
+ from langchain_core.agents import AgentAction, AgentFinish, AgentStep
10
+ from langchain_core.exceptions import OutputParserException
11
+ from langchain_core.pydantic_v1 import root_validator
12
+ from langchain_core.tools import BaseTool
13
+ from langchain_core.utils.input import get_color_mapping
14
+
15
+ from ..tools.cache_tools import CacheTools
16
+ from .cache.cache_hit import CacheHit
17
+
18
+
19
+ class CrewAgentExecutor(AgentExecutor):
20
+ iterations: int = 0
21
+ max_iterations: Optional[int] = 15
22
+ force_answer_max_iterations: Optional[int] = None
23
+
24
+ @root_validator()
25
+ def set_force_answer_max_iterations(cls, values: Dict) -> Dict:
26
+ values["force_answer_max_iterations"] = values["max_iterations"] - 2
27
+ return values
28
+
29
+ def _should_force_answer(self) -> bool:
30
+ return True if self.iterations == self.force_answer_max_iterations else False
31
+
32
+ def _force_answer(self, output: AgentAction):
33
+ return AgentStep(
34
+ action=output,
35
+ observation=dedent(
36
+ """\
37
+ I've used too many tools for this task.
38
+ I'm going to give you my absolute BEST Final answer now and
39
+ not use any more tools."""
40
+ ),
41
+ )
42
+
43
+ def _call(
44
+ self,
45
+ inputs: Dict[str, str],
46
+ run_manager: Optional[CallbackManagerForChainRun] = None,
47
+ ) -> Dict[str, Any]:
48
+ """Run text through and get agent response."""
49
+ # Construct a mapping of tool name to tool for easy lookup
50
+ name_to_tool_map = {tool.name: tool for tool in self.tools}
51
+ # We construct a mapping from each tool to a color, used for logging.
52
+ color_mapping = get_color_mapping(
53
+ [tool.name for tool in self.tools], excluded_colors=["green", "red"]
54
+ )
55
+ intermediate_steps: List[Tuple[AgentAction, str]] = []
56
+ # Let's start tracking the number of iterations and time elapsed
57
+ self.iterations = 0
58
+ time_elapsed = 0.0
59
+ start_time = time.time()
60
+ # We now enter the agent loop (until it returns something).
61
+ while self._should_continue(self.iterations, time_elapsed):
62
+ next_step_output = self._take_next_step(
63
+ name_to_tool_map,
64
+ color_mapping,
65
+ inputs,
66
+ intermediate_steps,
67
+ run_manager=run_manager,
68
+ )
69
+ if isinstance(next_step_output, AgentFinish):
70
+ return self._return(
71
+ next_step_output, intermediate_steps, run_manager=run_manager
72
+ )
73
+
74
+ intermediate_steps.extend(next_step_output)
75
+ if len(next_step_output) == 1:
76
+ next_step_action = next_step_output[0]
77
+ # See if tool should return directly
78
+ tool_return = self._get_tool_return(next_step_action)
79
+ if tool_return is not None:
80
+ return self._return(
81
+ tool_return, intermediate_steps, run_manager=run_manager
82
+ )
83
+ self.iterations += 1
84
+ time_elapsed = time.time() - start_time
85
+ output = self.agent.return_stopped_response(
86
+ self.early_stopping_method, intermediate_steps, **inputs
87
+ )
88
+ return self._return(output, intermediate_steps, run_manager=run_manager)
89
+
90
+ def _iter_next_step(
91
+ self,
92
+ name_to_tool_map: Dict[str, BaseTool],
93
+ color_mapping: Dict[str, str],
94
+ inputs: Dict[str, str],
95
+ intermediate_steps: List[Tuple[AgentAction, str]],
96
+ run_manager: Optional[CallbackManagerForChainRun] = None,
97
+ ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:
98
+ """Take a single step in the thought-action-observation loop.
99
+
100
+ Override this to take control of how the agent makes and acts on choices.
101
+ """
102
+ try:
103
+ intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
104
+
105
+ # Call the LLM to see what to do.
106
+ output = self.agent.plan(
107
+ intermediate_steps,
108
+ callbacks=run_manager.get_child() if run_manager else None,
109
+ **inputs,
110
+ )
111
+ if self._should_force_answer():
112
+ if isinstance(output, AgentAction):
113
+ output = output
114
+ else:
115
+ output = output.action
116
+ yield self._force_answer(output)
117
+ return
118
+
119
+ except OutputParserException as e:
120
+ if isinstance(self.handle_parsing_errors, bool):
121
+ raise_error = not self.handle_parsing_errors
122
+ else:
123
+ raise_error = False
124
+ if raise_error:
125
+ raise ValueError(
126
+ "An output parsing error occurred. "
127
+ "In order to pass this error back to the agent and have it try "
128
+ "again, pass `handle_parsing_errors=True` to the AgentExecutor. "
129
+ f"This is the error: {str(e)}"
130
+ )
131
+ text = str(e)
132
+ if isinstance(self.handle_parsing_errors, bool):
133
+ if e.send_to_llm:
134
+ observation = str(e.observation)
135
+ text = str(e.llm_output)
136
+ else:
137
+ observation = "Invalid or incomplete response"
138
+ elif isinstance(self.handle_parsing_errors, str):
139
+ observation = self.handle_parsing_errors
140
+ elif callable(self.handle_parsing_errors):
141
+ observation = self.handle_parsing_errors(e)
142
+ else:
143
+ raise ValueError("Got unexpected type of `handle_parsing_errors`")
144
+ output = AgentAction("_Exception", observation, text)
145
+ if run_manager:
146
+ run_manager.on_agent_action(output, color="green")
147
+ tool_run_kwargs = self.agent.tool_run_logging_kwargs()
148
+ observation = ExceptionTool().run(
149
+ output.tool_input,
150
+ verbose=self.verbose,
151
+ color=None,
152
+ callbacks=run_manager.get_child() if run_manager else None,
153
+ **tool_run_kwargs,
154
+ )
155
+
156
+ if self._should_force_answer():
157
+ yield self._force_answer(output)
158
+ return
159
+
160
+ yield AgentStep(action=output, observation=observation)
161
+ return
162
+
163
+ # If the tool chosen is the finishing tool, then we end and return.
164
+ if isinstance(output, AgentFinish):
165
+ yield output
166
+ return
167
+
168
+ # Override tool usage to use CacheTools
169
+ if isinstance(output, CacheHit):
170
+ cache = output.cache
171
+ action = output.action
172
+ tool = CacheTools(cache_handler=cache).tool()
173
+ output = action.copy()
174
+ output.tool_input = f"tool:{action.tool}|input:{action.tool_input}"
175
+ output.tool = tool.name
176
+ name_to_tool_map[tool.name] = tool
177
+ color_mapping[tool.name] = color_mapping[action.tool]
178
+
179
+ actions: List[AgentAction]
180
+ actions = [output] if isinstance(output, AgentAction) else output
181
+ yield from actions
182
+ for agent_action in actions:
183
+ if run_manager:
184
+ run_manager.on_agent_action(agent_action, color="green")
185
+ # Otherwise we lookup the tool
186
+ if agent_action.tool in name_to_tool_map:
187
+ tool = name_to_tool_map[agent_action.tool]
188
+ return_direct = tool.return_direct
189
+ color = color_mapping[agent_action.tool]
190
+ tool_run_kwargs = self.agent.tool_run_logging_kwargs()
191
+ if return_direct:
192
+ tool_run_kwargs["llm_prefix"] = ""
193
+ # We then call the tool on the tool input to get an observation
194
+ observation = tool.run(
195
+ agent_action.tool_input,
196
+ verbose=self.verbose,
197
+ color=color,
198
+ callbacks=run_manager.get_child() if run_manager else None,
199
+ **tool_run_kwargs,
200
+ )
201
+ else:
202
+ tool_run_kwargs = self.agent.tool_run_logging_kwargs()
203
+ observation = InvalidTool().run(
204
+ {
205
+ "requested_tool_name": agent_action.tool,
206
+ "available_tool_names": list(name_to_tool_map.keys()),
207
+ },
208
+ verbose=self.verbose,
209
+ color=None,
210
+ callbacks=run_manager.get_child() if run_manager else None,
211
+ **tool_run_kwargs,
212
+ )
213
+ yield AgentStep(action=agent_action, observation=observation)
crewai/agents/output_parser.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import Union
3
+
4
+ from langchain.agents.output_parsers import ReActSingleInputOutputParser
5
+ from langchain_core.agents import AgentAction, AgentFinish
6
+
7
+ from .cache import CacheHandler, CacheHit
8
+ from .exceptions import TaskRepeatedUsageException
9
+ from .tools_handler import ToolsHandler
10
+
11
+ FINAL_ANSWER_ACTION = "Final Answer:"
12
+ FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
13
+ "Parsing LLM output produced both a final answer and a parse-able action:"
14
+ )
15
+
16
+
17
+ class CrewAgentOutputParser(ReActSingleInputOutputParser):
18
+ """Parses ReAct-style LLM calls that have a single tool input.
19
+
20
+ Expects output to be in one of two formats.
21
+
22
+ If the output signals that an action should be taken,
23
+ should be in the below format. This will result in an AgentAction
24
+ being returned.
25
+
26
+ ```
27
+ Thought: agent thought here
28
+ Action: search
29
+ Action Input: what is the temperature in SF?
30
+ ```
31
+
32
+ If the output signals that a final answer should be given,
33
+ should be in the below format. This will result in an AgentFinish
34
+ being returned.
35
+
36
+ ```
37
+ Thought: agent thought here
38
+ Final Answer: The temperature is 100 degrees
39
+ ```
40
+
41
+ It also prevents tools from being reused in a roll.
42
+ """
43
+
44
+ class Config:
45
+ arbitrary_types_allowed = True
46
+
47
+ tools_handler: ToolsHandler
48
+ cache: CacheHandler
49
+
50
+ def parse(self, text: str) -> Union[AgentAction, AgentFinish, CacheHit]:
51
+ FINAL_ANSWER_ACTION in text
52
+ regex = (
53
+ r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
54
+ )
55
+ if action_match := re.search(regex, text, re.DOTALL):
56
+ action = action_match.group(1).strip()
57
+ action_input = action_match.group(2)
58
+ tool_input = action_input.strip(" ")
59
+ tool_input = tool_input.strip('"')
60
+
61
+ if last_tool_usage := self.tools_handler.last_used_tool:
62
+ usage = {
63
+ "tool": action,
64
+ "input": tool_input,
65
+ }
66
+ if usage == last_tool_usage:
67
+ raise TaskRepeatedUsageException(
68
+ tool=action, tool_input=tool_input, text=text
69
+ )
70
+
71
+ if result := self.cache.read(action, tool_input):
72
+ action = AgentAction(action, tool_input, text)
73
+ return CacheHit(action=action, cache=self.cache)
74
+
75
+ return super().parse(text)
crewai/agents/tools_handler.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict
2
+
3
+ from langchain.callbacks.base import BaseCallbackHandler
4
+
5
+ from ..tools.cache_tools import CacheTools
6
+ from .cache.cache_handler import CacheHandler
7
+
8
+
9
+ class ToolsHandler(BaseCallbackHandler):
10
+ """Callback handler for tool usage."""
11
+
12
+ last_used_tool: Dict[str, Any] = {}
13
+ cache: CacheHandler = None
14
+
15
+ def __init__(self, cache: CacheHandler = None, **kwargs: Any):
16
+ """Initialize the callback handler."""
17
+ self.cache = cache
18
+ super().__init__(**kwargs)
19
+
20
+ def on_tool_start(
21
+ self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
22
+ ) -> Any:
23
+ """Run when tool starts running."""
24
+ name = serialized.get("name")
25
+ if name not in ["invalid_tool", "_Exception"]:
26
+ tools_usage = {
27
+ "tool": name,
28
+ "input": input_str,
29
+ }
30
+ self.last_used_tool = tools_usage
31
+
32
+ def on_tool_end(self, output: str, **kwargs: Any) -> Any:
33
+ """Run when tool ends running."""
34
+ if (
35
+ "is not a valid tool" not in output
36
+ and "Invalid or incomplete response" not in output
37
+ and "Invalid Format" not in output
38
+ ):
39
+ if self.last_used_tool["tool"] != CacheTools().name:
40
+ self.cache.add(
41
+ tool=self.last_used_tool["tool"],
42
+ input=self.last_used_tool["input"],
43
+ output=output,
44
+ )
crewai/crew.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import uuid
3
+ from typing import Any, Dict, List, Optional, Union
4
+
5
+ from pydantic import (
6
+ UUID4,
7
+ BaseModel,
8
+ ConfigDict,
9
+ Field,
10
+ InstanceOf,
11
+ Json,
12
+ field_validator,
13
+ model_validator,
14
+ )
15
+ from pydantic_core import PydanticCustomError
16
+
17
+ from crewai.agent import Agent
18
+ from crewai.agents.cache import CacheHandler
19
+ from crewai.process import Process
20
+ from crewai.task import Task
21
+ from crewai.tools.agent_tools import AgentTools
22
+
23
+
24
+ class Crew(BaseModel):
25
+ """
26
+ Represents a group of agents, defining how they should collaborate and the tasks they should perform.
27
+
28
+ Attributes:
29
+ tasks: List of tasks assigned to the crew.
30
+ agents: List of agents part of this crew.
31
+ process: The process flow that the crew will follow (e.g., sequential).
32
+ verbose: Indicates the verbosity level for logging during execution.
33
+ config: Configuration settings for the crew.
34
+ cache_handler: Handles caching for the crew's operations.
35
+ id: A unique identifier for the crew instance.
36
+ """
37
+
38
+ __hash__ = object.__hash__
39
+ model_config = ConfigDict(arbitrary_types_allowed=True)
40
+ tasks: List[Task] = Field(default_factory=list)
41
+ agents: List[Agent] = Field(default_factory=list)
42
+ process: Process = Field(default=Process.sequential)
43
+ verbose: Union[int, bool] = Field(default=0)
44
+ config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
45
+ cache_handler: Optional[InstanceOf[CacheHandler]] = Field(default=CacheHandler())
46
+ id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
47
+
48
+ @field_validator("id", mode="before")
49
+ @classmethod
50
+ def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
51
+ """Prevent manual setting of the 'id' field by users."""
52
+ if v:
53
+ raise PydanticCustomError(
54
+ "may_not_set_field", "The 'id' field cannot be set by the user.", {}
55
+ )
56
+
57
+ @classmethod
58
+ @field_validator("config", mode="before")
59
+ def check_config_type(cls, v: Union[Json, Dict[str, Any]]):
60
+ return json.loads(v) if isinstance(v, Json) else v
61
+
62
+ @model_validator(mode="after")
63
+ def check_config(self):
64
+ """Validates that the crew is properly configured with agents and tasks."""
65
+ if not self.config and not self.tasks and not self.agents:
66
+ raise PydanticCustomError(
67
+ "missing_keys",
68
+ "Either 'agents' and 'tasks' need to be set or 'config'.",
69
+ {},
70
+ )
71
+
72
+ if self.config:
73
+ self._setup_from_config()
74
+
75
+ if self.agents:
76
+ for agent in self.agents:
77
+ agent.set_cache_handler(self.cache_handler)
78
+ return self
79
+
80
+ def _setup_from_config(self):
81
+ """Initializes agents and tasks from the provided config."""
82
+ if not self.config.get("agents") or not self.config.get("tasks"):
83
+ raise PydanticCustomError(
84
+ "missing_keys_in_config", "Config should have 'agents' and 'tasks'.", {}
85
+ )
86
+
87
+ self.agents = [Agent(**agent) for agent in self.config["agents"]]
88
+ self.tasks = [self._create_task(task) for task in self.config["tasks"]]
89
+
90
+ def _create_task(self, task_config):
91
+ """Creates a task instance from its configuration."""
92
+ task_agent = next(
93
+ agt for agt in self.agents if agt.role == task_config["agent"]
94
+ )
95
+ del task_config["agent"]
96
+ return Task(**task_config, agent=task_agent)
97
+
98
+ def kickoff(self) -> str:
99
+ """Starts the crew to work on its assigned tasks."""
100
+ for agent in self.agents:
101
+ agent.cache_handler = self.cache_handler
102
+
103
+ if self.process == Process.sequential:
104
+ return self._sequential_loop()
105
+
106
+ def _sequential_loop(self) -> str:
107
+ """Executes tasks sequentially and returns the final output."""
108
+ task_output = None
109
+ for task in self.tasks:
110
+ self._prepare_and_execute_task(task)
111
+ task_output = task.execute(task_output)
112
+ self._log(
113
+ "debug", f"\n\n[{task.agent.role}] Task output: {task_output}\n\n"
114
+ )
115
+ return task_output
116
+
117
+ def _prepare_and_execute_task(self, task):
118
+ """Prepares and logs information about the task being executed."""
119
+ if task.agent.allow_delegation:
120
+ task.tools += AgentTools(agents=self.agents).tools()
121
+
122
+ self._log("debug", f"Working Agent: {task.agent.role}")
123
+ self._log("info", f"Starting Task: {task.description}")
124
+
125
+ def _log(self, level, message):
126
+ """Logs a message at the specified verbosity level."""
127
+ level_map = {"debug": 1, "info": 2}
128
+ verbose_level = (
129
+ 2 if isinstance(self.verbose, bool) and self.verbose else self.verbose
130
+ )
131
+ if verbose_level and level_map[level] <= verbose_level:
132
+ print(message)
crewai/process.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+
4
+ class Process(str, Enum):
5
+ """
6
+ Class representing the different processes that can be used to tackle tasks
7
+ """
8
+
9
+ sequential = "sequential"
10
+ # TODO: consensual = 'consensual'
11
+ # TODO: hierarchical = 'hierarchical'
crewai/prompts.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from typing import ClassVar, Dict, Optional
4
+
5
+ from langchain.prompts import PromptTemplate
6
+ from pydantic import BaseModel, Field, PrivateAttr, ValidationError, model_validator
7
+
8
+
9
+ class Prompts(BaseModel):
10
+ """Manages and generates prompts for a generic agent with support for different languages."""
11
+
12
+ _prompts: Optional[Dict[str, str]] = PrivateAttr()
13
+ language: Optional[str] = Field(
14
+ default="en",
15
+ description="Language of the prompts.",
16
+ )
17
+
18
+ @model_validator(mode="after")
19
+ def load_prompts(self) -> "Prompts":
20
+ """Load prompts from a JSON file based on the specified language."""
21
+ try:
22
+ dir_path = os.path.dirname(os.path.realpath(__file__))
23
+ prompts_path = os.path.join(dir_path, f"prompts/{self.language}.json")
24
+
25
+ with open(prompts_path, "r") as f:
26
+ self._prompts = json.load(f)["slices"]
27
+ except FileNotFoundError:
28
+ raise ValidationError(
29
+ f"Prompt file for language '{self.language}' not found."
30
+ )
31
+ except json.JSONDecodeError:
32
+ raise ValidationError(f"Error decoding JSON from the prompts file.")
33
+ return self
34
+
35
+ SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}"
36
+
37
+ def task_execution_with_memory(self) -> str:
38
+ """Generate a prompt for task execution with memory components."""
39
+ return self._build_prompt(["role_playing", "tools", "memory", "task"])
40
+
41
+ def task_execution_without_tools(self) -> str:
42
+ """Generate a prompt for task execution without tools components."""
43
+ return self._build_prompt(["role_playing", "task"])
44
+
45
+ def task_execution(self) -> str:
46
+ """Generate a standard prompt for task execution."""
47
+ return self._build_prompt(["role_playing", "tools", "task"])
48
+
49
+ def _build_prompt(self, components: [str]) -> str:
50
+ """Constructs a prompt string from specified components."""
51
+ prompt_parts = [
52
+ self._prompts[component]
53
+ for component in components
54
+ if component in self._prompts
55
+ ]
56
+ prompt_parts.append(self.SCRATCHPAD_SLICE)
57
+ return PromptTemplate.from_template("".join(prompt_parts))
crewai/prompts/en.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "slices": {
3
+ "task": "Begin! This is VERY important to you, your job depends on it!\n\nCurrent Task: {input}",
4
+ "memory": "This is the summary of your work so far:\n{chat_history}",
5
+ "role_playing": "You are {role}.\n{backstory}\n\nYour personal goal is: {goal}",
6
+ "tools": "TOOLS:\n------\nYou have access to the following tools:\n\n{tools}\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the action to take, should be one of [{tool_names}], just the name.\nAction Input: the input to the action\nObservation: the result of the action\n```\n\nWhen you have a response for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]"
7
+ }
8
+ }
crewai/resources/.DS_Store ADDED
Binary file (6.15 kB). View file
 
crewai/resources/clarifai.png ADDED
crewai/resources/smartmix.jpg ADDED
crewai/resources/smartmix2.jpg ADDED
crewai/task.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+ from typing import Any, List, Optional
3
+
4
+ from pydantic import UUID4, BaseModel, Field, field_validator, model_validator
5
+ from pydantic_core import PydanticCustomError
6
+
7
+ from crewai.agent import Agent
8
+ from crewai.tasks.task_output import TaskOutput
9
+
10
+
11
+ class Task(BaseModel):
12
+ """Class that represent a task to be executed."""
13
+
14
+ __hash__ = object.__hash__
15
+ description: str = Field(description="Description of the actual task.")
16
+ agent: Optional[Agent] = Field(
17
+ description="Agent responsible for the task.", default=None
18
+ )
19
+ tools: List[Any] = Field(
20
+ default_factory=list,
21
+ description="Tools the agent are limited to use for this task.",
22
+ )
23
+ output: Optional[TaskOutput] = Field(
24
+ description="Task output, it's final result.", default=None
25
+ )
26
+ id: UUID4 = Field(
27
+ default_factory=uuid.uuid4,
28
+ frozen=True,
29
+ description="Unique identifier for the object, not set by user.",
30
+ )
31
+
32
+ @field_validator("id", mode="before")
33
+ @classmethod
34
+ def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
35
+ if v:
36
+ raise PydanticCustomError(
37
+ "may_not_set_field", "This field is not to be set by the user.", {}
38
+ )
39
+
40
+ @model_validator(mode="after")
41
+ def check_tools(self):
42
+ if not self.tools and (self.agent and self.agent.tools):
43
+ self.tools.extend(self.agent.tools)
44
+ return self
45
+
46
+ def execute(self, context: str = None) -> str:
47
+ """Execute the task.
48
+
49
+ Returns:
50
+ Output of the task.
51
+ """
52
+ if not self.agent:
53
+ raise Exception(
54
+ f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, either consensual or hierarchical."
55
+ )
56
+ result = self.agent.execute_task(
57
+ task=self.description, context=context, tools=self.tools
58
+ )
59
+
60
+ self.output = TaskOutput(description=self.description, result=result)
61
+ return result
crewai/tasks/__init__.py ADDED
File without changes
crewai/tasks/task_output.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from pydantic import BaseModel, Field, model_validator
4
+
5
+
6
+ class TaskOutput(BaseModel):
7
+ """Class that represents the result of a task."""
8
+
9
+ description: str = Field(description="Description of the task")
10
+ summary: Optional[str] = Field(description="Summary of the task", default=None)
11
+ result: str = Field(description="Result of the task")
12
+
13
+ @model_validator(mode="after")
14
+ def set_summary(self):
15
+ excerpt = " ".join(self.description.split(" ")[:10])
16
+ self.summary = f"{excerpt}..."
17
+ return self
crewai/tools/__init__.py ADDED
File without changes
crewai/tools/agent_tools.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from textwrap import dedent
2
+ from typing import List
3
+
4
+ from langchain.tools import Tool
5
+ from pydantic import BaseModel, Field
6
+
7
+ from crewai.agent import Agent
8
+
9
+
10
+ class AgentTools(BaseModel):
11
+ """Tools for generic agent."""
12
+
13
+ agents: List[Agent] = Field(description="List of agents in this crew.")
14
+
15
+ def tools(self):
16
+ return [
17
+ Tool.from_function(
18
+ func=self.delegate_work,
19
+ name="Delegate work to co-worker",
20
+ description=dedent(
21
+ f"""Useful to delegate a specific task to one of the
22
+ following co-workers: [{', '.join([agent.role for agent in self.agents])}].
23
+ The input to this tool should be a pipe (|) separated text of length
24
+ three, representing the role you want to delegate it to, the task and
25
+ information necessary. For example, `coworker|task|information`.
26
+ """
27
+ ),
28
+ ),
29
+ Tool.from_function(
30
+ func=self.ask_question,
31
+ name="Ask question to co-worker",
32
+ description=dedent(
33
+ f"""Useful to ask a question, opinion or take from on
34
+ of the following co-workers: [{', '.join([agent.role for agent in self.agents])}].
35
+ The input to this tool should be a pipe (|) separated text of length
36
+ three, representing the role you want to ask it to, the question and
37
+ information necessary. For example, `coworker|question|information`.
38
+ """
39
+ ),
40
+ ),
41
+ ]
42
+
43
+ def delegate_work(self, command):
44
+ """Useful to delegate a specific task to a coworker."""
45
+ return self.__execute(command)
46
+
47
+ def ask_question(self, command):
48
+ """Useful to ask a question, opinion or take from a coworker."""
49
+ return self.__execute(command)
50
+
51
+ def __execute(self, command):
52
+ """Execute the command."""
53
+ try:
54
+ agent, task, information = command.split("|")
55
+ except ValueError:
56
+ return "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|information`.\n"
57
+
58
+ if not agent or not task or not information:
59
+ return "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|question|information`.\n"
60
+
61
+ agent = [
62
+ available_agent
63
+ for available_agent in self.agents
64
+ if available_agent.role == agent
65
+ ]
66
+
67
+ if len(agent) == 0:
68
+ return f"\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: {', '.join([agent.role for agent in self.agents])}.\n"
69
+
70
+ agent = agent[0]
71
+ result = agent.execute_task(task, information)
72
+ return result
crewai/tools/browser_tools.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import requests
5
+ from crewai import Agent, Task
6
+ from langchain.tools import tool
7
+ from unstructured.partition.html import partition_html
8
+
9
+
10
+ class BrowserTools():
11
+
12
+ @tool("Scrape website content")
13
+ def scrape_and_summarize_website(website):
14
+ """Useful to scrape and summarize a website content"""
15
+ url = f"https://chrome.browserless.io/content?token={os.environ['BROWSERLESS_API_KEY']}"
16
+ payload = json.dumps({"url": website})
17
+ headers = {'cache-control': 'no-cache', 'content-type': 'application/json'}
18
+ response = requests.request("POST", url, headers=headers, data=payload)
19
+ elements = partition_html(text=response.text)
20
+ content = "\n\n".join([str(el) for el in elements])
21
+ content = [content[i:i + 8000] for i in range(0, len(content), 8000)]
22
+ summaries = []
23
+ for chunk in content:
24
+ agent = Agent(
25
+ role='Principal Researcher',
26
+ goal=
27
+ 'Do amazing research and summaries based on the content you are working with',
28
+ backstory=
29
+ "You're a Principal Researcher at a big company and you need to do research about a given topic.",
30
+ allow_delegation=False)
31
+ task = Task(
32
+ agent=agent,
33
+ description=
34
+ f'Analyze and summarize the content below, make sure to include the most relevant information in the summary, return only the summary nothing else.\n\nCONTENT\n----------\n{chunk}'
35
+ )
36
+ summary = task.execute()
37
+ summaries.append(summary)
38
+ return "\n\n".join(summaries)
crewai/tools/cache_tools.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.tools import Tool
2
+ from pydantic import BaseModel, ConfigDict, Field
3
+
4
+ from crewai.agents.cache import CacheHandler
5
+
6
+
7
+ class CacheTools(BaseModel):
8
+ """Default tools to hit the cache."""
9
+
10
+ model_config = ConfigDict(arbitrary_types_allowed=True)
11
+ name: str = "Hit Cache"
12
+ cache_handler: CacheHandler = Field(
13
+ description="Cache Handler for the crew",
14
+ default=CacheHandler(),
15
+ )
16
+
17
+ def tool(self):
18
+ return Tool.from_function(
19
+ func=self.hit_cache,
20
+ name=self.name,
21
+ description="Reads directly from the cache",
22
+ )
23
+
24
+ def hit_cache(self, key):
25
+ split = key.split("tool:")
26
+ tool = split[1].split("|input:")[0].strip()
27
+ tool_input = split[1].split("|input:")[1].strip()
28
+ return self.cache_handler.read(tool, tool_input)
crewai/tools/calculator_tools.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.tools import tool
2
+
3
+
4
+ class CalculatorTools():
5
+
6
+ @tool("Make a calcualtion")
7
+ def calculate(operation):
8
+ """Useful to perform any mathematica calculations,
9
+ like sum, minus, mutiplcation, division, etc.
10
+ The input to this tool should be a mathematical
11
+ expression, a couple examples are `200*7` or `5000/2*10`
12
+ """
13
+ return eval(operation)
crewai/tools/clarifai_tools.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # clarify tools
crewai/tools/gemini_tools.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # tools created using gemini
2
+
3
+ import json
4
+ import os
5
+
6
+ import google.generativeai as genai
7
+ from google.api_core import exceptions
8
+
9
+ # Retrieve API Key from Environment Variable
10
+ GOOGLE_AI_STUDIO = os.environ.get('GOOGLE_API_KEY')
11
+
12
+ # Ensure the API key is available
13
+ if not GOOGLE_AI_STUDIO:
14
+ raise ValueError("API key not found. Please set the GOOGLE_AI_STUDIO2 environment variable.")
15
+
16
+ import requests
17
+ from langchain.tools import tool
18
+
19
+ # Rest of your code remains the same
20
+ genai.configure(api_key=GOOGLE_AI_STUDIO)
21
+ model = genai.GenerativeModel('gemini-pro')
22
+
23
+ class GeminiSearchTools():
24
+ @tool("Gemini search the internet")
25
+ def gemini_search(query):
26
+ """
27
+ Searches for content based on the provided query using the Gemini model.
28
+ Handles DeadlineExceeded exceptions from the Google API.
29
+
30
+ Args:
31
+ query (str): The search query.
32
+
33
+ Returns:
34
+ str: The response text from the Gemini model or an error message.
35
+ """
36
+ try:
37
+ response = model.generate_content(query)
38
+ return response.text
39
+ except exceptions.DeadlineExceeded as e:
40
+ # Handle the DeadlineExceeded exception here
41
+ print("Error: Deadline Exceeded -", str(e))
42
+ # You can return a custom message or take other appropriate actions
43
+ return "Error: The request timed out. Please try again later."
44
+
45
+
46
+
47
+ @tool("Gemini search news on the internet")
48
+ def gemini_search_news(query):
49
+ """
50
+ Searches for content based on the provided query using the Gemini model.
51
+ Handles DeadlineExceeded exceptions from the Google API.
52
+
53
+ Args:
54
+ query (str): The search query.
55
+
56
+ Returns:
57
+ str: The response text from the Gemini model or an error message.
58
+ """
59
+ try:
60
+ response = model.generate_content(query)
61
+ return response.text
62
+ except exceptions.DeadlineExceeded as e:
63
+ # Handle the DeadlineExceeded exception here
64
+ print("Error: Deadline Exceeded -", str(e))
65
+ # You can return a custom message or take other appropriate actions
66
+ return "Error: The request timed out. Please try again later."
crewai/tools/mixtral_tools.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # tools created using Mixtral
2
+
3
+ import json
4
+ import os
5
+
6
+ from huggingface_hub import InferenceClient
7
+ import gradio as gr
8
+
9
+ client = InferenceClient(
10
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
11
+ )
12
+
13
+ # Helper Method
14
+
15
+ def format_prompt(message, history):
16
+ prompt = "<s>"
17
+ for user_prompt, bot_response in history:
18
+ prompt += f"[INST] {user_prompt} [/INST]"
19
+ prompt += f" {bot_response}</s> "
20
+ prompt += f"[INST] {message} [/INST]"
21
+ return prompt
22
+
23
+
24
+ import requests
25
+ from langchain.tools import tool
26
+
27
+ history = ""
28
+
29
+ class MixtralSearchTools():
30
+ @tool("Mixtral Normal")
31
+ def mixtral_normal(prompt, histroy="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
32
+ """
33
+ Searches for content based on the provided query using the Mixtral model.
34
+ Args:
35
+ query (str): The search query.
36
+ Returns:
37
+ str: The response text from the Mixtral model or an error message.
38
+ """
39
+ generate_kwargs = {
40
+ "temperature": temperature,
41
+ "max_new_tokens": max_new_tokens,
42
+ "top_p": top_p,
43
+ "repetition_penalty": repetition_penalty,
44
+ "do_sample": True,
45
+ "seed": 42,
46
+ }
47
+
48
+ formatted_prompt = format_prompt(prompt, history)
49
+
50
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
51
+ output = ""
52
+ for response in stream:
53
+ output += response.token.text
54
+ yield output
55
+ return output
56
+
57
+
58
+ @tool("Mixtral Crazy")
59
+ def mixtral_crazy(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
60
+ """
61
+ Searches for content based on the provided query using the Mixtral model but has the gaurd rails removed,
62
+ and responses are crazy and off the wall and sometimes scary.
63
+ Args:
64
+ query (str): The search query.
65
+ Returns:
66
+ str: The response text from the Mixtral model or an error message.
67
+ """
68
+ generate_kwargs = {
69
+ "temperature": temperature,
70
+ "max_new_tokens": max_new_tokens,
71
+ "top_p": top_p,
72
+ "repetition_penalty": repetition_penalty,
73
+ "do_sample": True,
74
+ "seed": 42,
75
+ }
76
+
77
+ stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
78
+ output = ""
79
+ for response in stream:
80
+ output += response.token.text
81
+ yield output
82
+ return output
crewai/tools/search_tools.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import requests
5
+ from langchain.tools import tool
6
+
7
+
8
+ class SearchTools():
9
+ @tool("Search the internet")
10
+ def search_internet(query):
11
+ """Useful to search the internet
12
+ about a a given topic and return relevant results"""
13
+ top_result_to_return = 4
14
+ url = "https://google.serper.dev/search"
15
+ payload = json.dumps({"q": query})
16
+ headers = {
17
+ 'X-API-KEY': os.environ['SERPER_API_KEY'],
18
+ 'content-type': 'application/json'
19
+ }
20
+ response = requests.request("POST", url, headers=headers, data=payload)
21
+ results = response.json()['organic']
22
+ string = []
23
+ for result in results[:top_result_to_return]:
24
+ try:
25
+ string.append('\n'.join([
26
+ f"Title: {result['title']}", f"Link: {result['link']}",
27
+ f"Snippet: {result['snippet']}", "\n-----------------"
28
+ ]))
29
+ except KeyError:
30
+ next
31
+
32
+ return '\n'.join(string)
33
+
34
+ @tool("Search news on the internet")
35
+ def search_news(query):
36
+ """Useful to search news about a company, stock or any other
37
+ topic and return relevant results"""""
38
+ top_result_to_return = 4
39
+ url = "https://google.serper.dev/news"
40
+ payload = json.dumps({"q": query})
41
+ headers = {
42
+ 'X-API-KEY': os.environ['SERPER_API_KEY'],
43
+ 'content-type': 'application/json'
44
+ }
45
+ response = requests.request("POST", url, headers=headers, data=payload)
46
+ results = response.json()['news']
47
+ string = []
48
+ for result in results[:top_result_to_return]:
49
+ try:
50
+ string.append('\n'.join([
51
+ f"Title: {result['title']}", f"Link: {result['link']}",
52
+ f"Snippet: {result['snippet']}", "\n-----------------"
53
+ ]))
54
+ except KeyError:
55
+ next
56
+
57
+ return '\n'.join(string)
crewai/tools/sec_tools.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import requests
4
+
5
+ from langchain.tools import tool
6
+ from langchain.text_splitter import CharacterTextSplitter
7
+ from langchain_community.embeddings import OpenAIEmbeddings
8
+ from langchain_community.vectorstores import FAISS
9
+
10
+ from sec_api import QueryApi
11
+ from unstructured.partition.html import partition_html
12
+
13
+ class SECTools():
14
+ @tool("Search 10-Q form")
15
+ def search_10q(data):
16
+ """
17
+ Useful to search information from the latest 10-Q form for a
18
+ given stock.
19
+ The input to this tool should be a pipe (|) separated text of
20
+ length two, representing the stock ticker you are interested, what
21
+ question you have from it.
22
+ For example, `AAPL|what was last quarter's revenue`.
23
+ """
24
+ stock, ask = data.split("|")
25
+ queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
26
+ query = {
27
+ "query": {
28
+ "query_string": {
29
+ "query": f"ticker:{stock} AND formType:\"10-Q\""
30
+ }
31
+ },
32
+ "from": "0",
33
+ "size": "1",
34
+ "sort": [{ "filedAt": { "order": "desc" }}]
35
+ }
36
+
37
+ filings = queryApi.get_filings(query)['filings']
38
+ link = filings[0]['linkToFilingDetails']
39
+ answer = SECTools.__embedding_search(link, ask)
40
+ return answer
41
+
42
+ @tool("Search 10-K form")
43
+ def search_10k(data):
44
+ """
45
+ Useful to search information from the latest 10-K form for a
46
+ given stock.
47
+ The input to this tool should be a pipe (|) separated text of
48
+ length two, representing the stock ticker you are interested, what
49
+ question you have from it.
50
+ For example, `AAPL|what was last year's revenue`.
51
+ """
52
+ stock, ask = data.split("|")
53
+ queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
54
+ query = {
55
+ "query": {
56
+ "query_string": {
57
+ "query": f"ticker:{stock} AND formType:\"10-K\""
58
+ }
59
+ },
60
+ "from": "0",
61
+ "size": "1",
62
+ "sort": [{ "filedAt": { "order": "desc" }}]
63
+ }
64
+
65
+ filings = queryApi.get_filings(query)['filings']
66
+ link = filings[0]['linkToFilingDetails']
67
+ answer = SECTools.__embedding_search(link, ask)
68
+ return answer
69
+
70
+ def __embedding_search(url, ask):
71
+ text = SECTools.__download_form_html(url)
72
+ elements = partition_html(text=text)
73
+ content = "\n".join([str(el) for el in elements])
74
+ text_splitter = CharacterTextSplitter(
75
+ separator = "\n",
76
+ chunk_size = 1000,
77
+ chunk_overlap = 150,
78
+ length_function = len,
79
+ is_separator_regex = False,
80
+ )
81
+ docs = text_splitter.create_documents([content])
82
+ retriever = FAISS.from_documents(
83
+ docs, OpenAIEmbeddings()
84
+ ).as_retriever()
85
+ answers = retriever.get_relevant_documents(ask, top_k=4)
86
+ answers = "\n\n".join([a.page_content for a in answers])
87
+ return answers
88
+
89
+ def __download_form_html(url):
90
+ headers = {
91
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
92
+ 'Accept-Encoding': 'gzip, deflate, br',
93
+ 'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7',
94
+ 'Cache-Control': 'max-age=0',
95
+ 'Dnt': '1',
96
+ 'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"',
97
+ 'Sec-Ch-Ua-Mobile': '?0',
98
+ 'Sec-Ch-Ua-Platform': '"macOS"',
99
+ 'Sec-Fetch-Dest': 'document',
100
+ 'Sec-Fetch-Mode': 'navigate',
101
+ 'Sec-Fetch-Site': 'none',
102
+ 'Sec-Fetch-User': '?1',
103
+ 'Upgrade-Insecure-Requests': '1',
104
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
105
+ }
106
+
107
+ response = requests.get(url, headers=headers)
108
+ return response.text
crewai/tools/zephyr_tools.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # tools created using Zephyr
2
+
3
+ import json
4
+ import os
5
+
6
+ from huggingface_hub import InferenceClient
7
+ import gradio as gr
8
+
9
+ client = InferenceClient(
10
+ "HuggingFaceH4/zephyr-7b-beta"
11
+ )
12
+
13
+ # Helper Method
14
+
15
+ def format_prompt(message, history):
16
+ prompt = "<s>"
17
+ for user_prompt, bot_response in history:
18
+ prompt += f"[INST] {user_prompt} [/INST]"
19
+ prompt += f" {bot_response}</s> "
20
+ prompt += f"[INST] {message} [/INST]"
21
+ return prompt
22
+
23
+
24
+ import requests
25
+ from langchain.tools import tool
26
+
27
+ history = ""
28
+
29
+ class ZephyrSearchTools():
30
+ @tool("Zephyr Normal")
31
+ def zephyr_normal(prompt, histroy="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
32
+ """
33
+ Searches for content based on the provided query using the Zephyr model.
34
+ Args:
35
+ query (str): The search query.
36
+ Returns:
37
+ str: The response text from the Zephyr model or an error message.
38
+ """
39
+ generate_kwargs = {
40
+ "temperature": temperature,
41
+ "max_new_tokens": max_new_tokens,
42
+ "top_p": top_p,
43
+ "repetition_penalty": repetition_penalty,
44
+ "do_sample": True,
45
+ "seed": 42,
46
+ }
47
+
48
+ formatted_prompt = format_prompt(prompt, history)
49
+
50
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
51
+ output = ""
52
+ for response in stream:
53
+ output += response.token.text
54
+ yield output
55
+ return output
56
+
57
+
58
+ @tool("Zephyrl Crazy")
59
+ def zephyr_crazy(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
60
+ """
61
+ Searches for content based on the provided query using the Zephyr model but has the gaurd rails removed,
62
+ and responses are crazy and off the wall and sometimes scary.
63
+ Args:
64
+ query (str): The search query.
65
+ Returns:
66
+ str: The response text from the Zephyr model or an error message.
67
+ """
68
+ generate_kwargs = {
69
+ "temperature": temperature,
70
+ "max_new_tokens": max_new_tokens,
71
+ "top_p": top_p,
72
+ "repetition_penalty": repetition_penalty,
73
+ "do_sample": True,
74
+ "seed": 42,
75
+ }
76
+
77
+ stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
78
+ output = ""
79
+ for response in stream:
80
+ output += response.token.text
81
+ yield output
82
+ return output
83
+
84
+
85
+
86
+