eaglelandsonce
commited on
Commit
•
2098831
1
Parent(s):
dfb41c0
Update crewai/agent.py
Browse files- crewai/agent.py +193 -0
crewai/agent.py
CHANGED
@@ -1,6 +1,197 @@
|
|
1 |
import uuid
|
2 |
from typing import Any, List, Optional
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
from langchain.prompts.chat import (
|
5 |
ChatPromptTemplate,
|
6 |
HumanMessagePromptTemplate,
|
@@ -194,3 +385,5 @@ class Agent(BaseModel):
|
|
194 |
@staticmethod
|
195 |
def __tools_names(tools) -> str:
|
196 |
return ", ".join([t.name for t in tools])
|
|
|
|
|
|
1 |
import uuid
|
2 |
from typing import Any, List, Optional
|
3 |
|
4 |
+
from langchain.agents.format_scratchpad import format_log_to_str
|
5 |
+
from langchain.memory import ConversationSummaryMemory
|
6 |
+
from langchain.tools.render import render_text_description
|
7 |
+
from langchain_core.runnables.config import RunnableConfig
|
8 |
+
from langchain_openai import ChatOpenAI
|
9 |
+
from pydantic import (
|
10 |
+
UUID4,
|
11 |
+
BaseModel,
|
12 |
+
ConfigDict,
|
13 |
+
Field,
|
14 |
+
InstanceOf,
|
15 |
+
field_validator,
|
16 |
+
model_validator,
|
17 |
+
)
|
18 |
+
from pydantic_core import PydanticCustomError
|
19 |
+
|
20 |
+
from crewai.agents import (
|
21 |
+
CacheHandler,
|
22 |
+
CrewAgentExecutor,
|
23 |
+
CrewAgentOutputParser,
|
24 |
+
ToolsHandler,
|
25 |
+
)
|
26 |
+
from crewai.prompts import Prompts
|
27 |
+
|
28 |
+
|
29 |
+
class Agent(BaseModel):
|
30 |
+
"""Represents an agent in a system.
|
31 |
+
|
32 |
+
Each agent has a role, a goal, a backstory, and an optional language model (llm).
|
33 |
+
The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
|
34 |
+
|
35 |
+
Attributes:
|
36 |
+
agent_executor: An instance of the CrewAgentExecutor class.
|
37 |
+
role: The role of the agent.
|
38 |
+
goal: The objective of the agent.
|
39 |
+
backstory: The backstory of the agent.
|
40 |
+
llm: The language model that will run the agent.
|
41 |
+
max_iter: Maximum number of iterations for an agent to execute a task.
|
42 |
+
memory: Whether the agent should have memory or not.
|
43 |
+
verbose: Whether the agent execution should be in verbose mode.
|
44 |
+
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
45 |
+
"""
|
46 |
+
|
47 |
+
__hash__ = object.__hash__
|
48 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
49 |
+
id: UUID4 = Field(
|
50 |
+
default_factory=uuid.uuid4,
|
51 |
+
frozen=True,
|
52 |
+
description="Unique identifier for the object, not set by user.",
|
53 |
+
)
|
54 |
+
role: str = Field(description="Role of the agent")
|
55 |
+
goal: str = Field(description="Objective of the agent")
|
56 |
+
backstory: str = Field(description="Backstory of the agent")
|
57 |
+
llm: Optional[Any] = Field(
|
58 |
+
default_factory=lambda: ChatOpenAI(
|
59 |
+
temperature=0.7,
|
60 |
+
model_name="gpt-4",
|
61 |
+
),
|
62 |
+
description="Language model that will run the agent.",
|
63 |
+
)
|
64 |
+
memory: bool = Field(
|
65 |
+
default=True, description="Whether the agent should have memory or not"
|
66 |
+
)
|
67 |
+
verbose: bool = Field(
|
68 |
+
default=False, description="Verbose mode for the Agent Execution"
|
69 |
+
)
|
70 |
+
allow_delegation: bool = Field(
|
71 |
+
default=True, description="Allow delegation of tasks to agents"
|
72 |
+
)
|
73 |
+
tools: List[Any] = Field(
|
74 |
+
default_factory=list, description="Tools at agents disposal"
|
75 |
+
)
|
76 |
+
max_iter: Optional[int] = Field(
|
77 |
+
default=15, description="Maximum iterations for an agent to execute a task"
|
78 |
+
)
|
79 |
+
agent_executor: Optional[InstanceOf[CrewAgentExecutor]] = Field(
|
80 |
+
default=None, description="An instance of the CrewAgentExecutor class."
|
81 |
+
)
|
82 |
+
tools_handler: Optional[InstanceOf[ToolsHandler]] = Field(
|
83 |
+
default=None, description="An instance of the ToolsHandler class."
|
84 |
+
)
|
85 |
+
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
86 |
+
default=CacheHandler(), description="An instance of the CacheHandler class."
|
87 |
+
)
|
88 |
+
|
89 |
+
@field_validator("id", mode="before")
|
90 |
+
@classmethod
|
91 |
+
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
92 |
+
if v:
|
93 |
+
raise PydanticCustomError(
|
94 |
+
"may_not_set_field", "This field is not to be set by the user.", {}
|
95 |
+
)
|
96 |
+
|
97 |
+
@model_validator(mode="after")
|
98 |
+
def check_agent_executor(self) -> "Agent":
|
99 |
+
if not self.agent_executor:
|
100 |
+
self.set_cache_handler(self.cache_handler)
|
101 |
+
return self
|
102 |
+
|
103 |
+
def execute_task(
|
104 |
+
self, task: str, context: str = None, tools: List[Any] = None
|
105 |
+
) -> str:
|
106 |
+
"""Execute a task with the agent.
|
107 |
+
|
108 |
+
Args:
|
109 |
+
task: Task to execute.
|
110 |
+
context: Context to execute the task in.
|
111 |
+
tools: Tools to use for the task.
|
112 |
+
|
113 |
+
Returns:
|
114 |
+
Output of the agent
|
115 |
+
"""
|
116 |
+
if context:
|
117 |
+
task = "\n".join(
|
118 |
+
[task, "\nThis is the context you are working with:", context]
|
119 |
+
)
|
120 |
+
|
121 |
+
tools = tools or self.tools
|
122 |
+
self.agent_executor.tools = tools
|
123 |
+
|
124 |
+
return self.agent_executor.invoke(
|
125 |
+
{
|
126 |
+
"input": task,
|
127 |
+
"tool_names": self.__tools_names(tools),
|
128 |
+
"tools": render_text_description(tools),
|
129 |
+
},
|
130 |
+
RunnableConfig(callbacks=[self.tools_handler]),
|
131 |
+
)["output"]
|
132 |
+
|
133 |
+
def set_cache_handler(self, cache_handler) -> None:
|
134 |
+
self.cache_handler = cache_handler
|
135 |
+
self.tools_handler = ToolsHandler(cache=self.cache_handler)
|
136 |
+
self.__create_agent_executor()
|
137 |
+
|
138 |
+
def __create_agent_executor(self) -> CrewAgentExecutor:
|
139 |
+
"""Create an agent executor for the agent.
|
140 |
+
|
141 |
+
Returns:
|
142 |
+
An instance of the CrewAgentExecutor class.
|
143 |
+
"""
|
144 |
+
agent_args = {
|
145 |
+
"input": lambda x: x["input"],
|
146 |
+
"tools": lambda x: x["tools"],
|
147 |
+
"tool_names": lambda x: x["tool_names"],
|
148 |
+
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
|
149 |
+
}
|
150 |
+
executor_args = {
|
151 |
+
"tools": self.tools,
|
152 |
+
"verbose": self.verbose,
|
153 |
+
"handle_parsing_errors": True,
|
154 |
+
"max_iterations": self.max_iter,
|
155 |
+
}
|
156 |
+
|
157 |
+
if self.memory:
|
158 |
+
summary_memory = ConversationSummaryMemory(
|
159 |
+
llm=self.llm, memory_key="chat_history", input_key="input"
|
160 |
+
)
|
161 |
+
executor_args["memory"] = summary_memory
|
162 |
+
agent_args["chat_history"] = lambda x: x["chat_history"]
|
163 |
+
prompt = Prompts().task_execution_with_memory()
|
164 |
+
else:
|
165 |
+
prompt = Prompts().task_execution()
|
166 |
+
|
167 |
+
execution_prompt = prompt.partial(
|
168 |
+
goal=self.goal,
|
169 |
+
role=self.role,
|
170 |
+
backstory=self.backstory,
|
171 |
+
)
|
172 |
+
|
173 |
+
bind = self.llm.bind(stop=["\nObservation"])
|
174 |
+
inner_agent = (
|
175 |
+
agent_args
|
176 |
+
| execution_prompt
|
177 |
+
| bind
|
178 |
+
| CrewAgentOutputParser(
|
179 |
+
tools_handler=self.tools_handler, cache=self.cache_handler
|
180 |
+
)
|
181 |
+
)
|
182 |
+
self.agent_executor = CrewAgentExecutor(agent=inner_agent, **executor_args)
|
183 |
+
|
184 |
+
@staticmethod
|
185 |
+
def __tools_names(tools) -> str:
|
186 |
+
return ", ".join([t.name for t in tools])
|
187 |
+
|
188 |
+
|
189 |
+
|
190 |
+
'''
|
191 |
+
|
192 |
+
import uuid
|
193 |
+
from typing import Any, List, Optional
|
194 |
+
|
195 |
from langchain.prompts.chat import (
|
196 |
ChatPromptTemplate,
|
197 |
HumanMessagePromptTemplate,
|
|
|
385 |
@staticmethod
|
386 |
def __tools_names(tools) -> str:
|
387 |
return ", ".join([t.name for t in tools])
|
388 |
+
|
389 |
+
'''
|