File size: 8,510 Bytes
4962437
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305

import faiss
from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores import FAISS
from langchain_experimental.autonomous_agents import AutoGPT

from swarms.agents.message import Message
from swarms.tools.autogpt import (
    ReadFileTool,
    WriteFileTool,
    compile,
    process_csv,
    load_qa_with_sources_chain,
    WebpageQATool
)
from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator

#cache
ROOT_DIR = "./data/"

#main
class Worker:
    """
    Useful for when you need to spawn an autonomous agent instance as a worker to accomplish complex tasks, 
    it can search the internet or spawn child multi-modality models to process and generate images and text or audio and so on

    Parameters:
    - `model_name` (str): The name of the language model to be used (default: "gpt-4").
    - `openai_api_key` (str): The OpenAI API key (optional).
    - `ai_name` (str): The name of the AI worker.
    - `ai_role` (str): The role of the AI worker.
    - `external_tools` (list): List of external tools (optional).
    - `human_in_the_loop` (bool): Enable human-in-the-loop interaction (default: False).
    - `temperature` (float): The temperature parameter for response generation (default: 0.5).
    - `llm` (ChatOpenAI): Pre-initialized ChatOpenAI model instance (optional).
    - `openai` (bool): If True, use the OpenAI language model; otherwise, use `llm` (default: True).
    
    #Usage 
    ```
    from swarms import Worker

    node = Worker(
        ai_name="Optimus Prime",

    )

    task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times."
    response = node.run(task)
    print(response)
    ```

    llm + tools + memory

    """
    def __init__(
        self, 
        ai_name: str = "Autobot Swarm Worker",
        ai_role: str = "Worker in a swarm",
        external_tools = None,
        human_in_the_loop = False,
        temperature: float = 0.5,
        llm = None,
        openai_api_key: str = None,
    ):
        self.temperature = temperature
        self.human_in_the_loop = human_in_the_loop
        self.llm = llm
        self.openai_api_key = openai_api_key
        self.ai_name = ai_name
        self.ai_role = ai_role
        self.setup_tools(external_tools)
        self.setup_memory()
        self.setup_agent()
        
    def reset(self):
        """
        Reset the message history.
        """
        self.message_history = ["Here is the conversation so far"]
    
    @property
    def name(self):
        return self.ai_name
    
    def receieve(
        self, 
        name: str, 
        message: str
    ) -> None:
        """
        Receive a message and update the message history.

        Parameters:
        - `name` (str): The name of the sender.
        - `message` (str): The received message.
        """
        self.message_history.append(f"{name}: {message}")

    def send(self) -> str:
        self.agent.run(task=self.message_history)

    def add(self, task, priority=0):
        self.task_queue.append((priority, task))
    
    def setup_tools(self, external_tools):
        """
        Set up tools for the worker.

        Parameters:
        - `external_tools` (list): List of external tools (optional).

        Example:
        ```
        external_tools = [MyTool1(), MyTool2()]
        worker = Worker(model_name="gpt-4", 
                openai_api_key="my_key", 
                ai_name="My Worker", 
                ai_role="Worker", 
                external_tools=external_tools, 
                human_in_the_loop=False, 
                temperature=0.5)
        ```
        """
        query_website_tool = WebpageQATool(
            qa_chain=load_qa_with_sources_chain(self.llm)
        )

        self.tools = [
            WriteFileTool(root_dir=ROOT_DIR),
            ReadFileTool(root_dir=ROOT_DIR),
            process_csv,
            query_website_tool,
            HumanInputRun(),
            compile,
            # VQAinference,
        ]
        if external_tools is not None:
            self.tools.extend(external_tools)


    def setup_memory(self):
        """
        Set up memory for the worker.
        """
        try:
            embeddings_model = OpenAIEmbeddings(openai_api_key=self.openai_api_key)
            embedding_size = 1536
            index = faiss.IndexFlatL2(embedding_size)

            self.vectorstore = FAISS(
                embeddings_model.embed_query, 
                index, 
                InMemoryDocstore({}), {}
            )
            
        except Exception as error:
            raise RuntimeError(f"Error setting up memory perhaps try try tuning the embedding size: {error}")
        
    
    def setup_agent(self):
        """
        Set up the autonomous agent.
        """
        try: 
            self.agent = AutoGPT.from_llm_and_tools(
                ai_name=self.ai_name,
                ai_role=self.ai_role,
                tools=self.tools,
                llm=self.llm,
                memory=self.vectorstore.as_retriever(search_kwargs={"k": 8}),
                human_in_the_loop=self.human_in_the_loop
            )
        
        except Exception as error:
            raise RuntimeError(f"Error setting up agent: {error}")
    
    @log_decorator
    @error_decorator
    @timing_decorator
    def run(
        self,
        task: str = None
    ):
        """
        Run the autonomous agent on a given task.

        Parameters:
        - `task`: The task to be processed.

        Returns:
        - `result`: The result of the agent's processing.
        """
        try:
            result = self.agent.run([task])
            return result
        except Exception as error:
            raise RuntimeError(f"Error while running agent: {error}")
    
    @log_decorator
    @error_decorator
    @timing_decorator
    def __call__(
        self, 
        task: str = None
    ):
        """
        Make the worker callable to run the agent on a given task.

        Parameters:
        - `task`: The task to be processed.

        Returns:
        - `results`: The results of the agent's processing.
        """
        try:
            results = self.agent.run([task])
            return results
        except Exception as error:
            raise RuntimeError(f"Error while running agent: {error}")

    def health_check(self):
        pass

    @log_decorator
    @error_decorator
    @timing_decorator
    def chat(
        self,
        msg: str = None,
        streaming: bool = False
    ):
        """
        Run chat
        
        Args:
            msg (str, optional): Message to send to the agent. Defaults to None.
            language (str, optional): Language to use. Defaults to None.
            streaming (bool, optional): Whether to stream the response. Defaults to False.

        Returns:
            str: Response from the agent
        
        Usage:
        --------------
        agent = MultiModalAgent()
        agent.chat("Hello")
        
        """
        
        #add users message to the history
        self.history.append(
            Message(
                "User",
                msg
            )
        )

        #process msg
        try:
            response = self.agent.run(msg)

            #add agent's response to the history
            self.history.append(
                Message(
                    "Agent",
                    response
                )
            )

            #if streaming is = True
            if streaming:
                return self._stream_response(response)
            else:
                response

        except Exception as error:
            error_message = f"Error processing message: {str(error)}"

            #add error to history
            self.history.append(
                Message(
                    "Agent",
                    error_message
                )
            )

            return error_message
    
    def _stream_response(
        self, 
        response: str = None
    ):
        """
        Yield the response token by token (word by word)
        
        Usage:
        --------------
        for token in _stream_response(response):
            print(token)
        
        """
        for token in response.split():
            yield token