Shad0ws commited on
Commit
2b0805d
1 Parent(s): 2767c2f

Upload 13 files

Browse files
__pycache__/constants.cpython-311.pyc ADDED
Binary file (337 Bytes). View file
 
app.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from constants import (
4
+ EMBEDDING_MODEL_NAME,
5
+ EMBEDDING_SIZE,
6
+ TODO_CHAIN_MODEL_NAME,
7
+ BABY_AGI_MODEL_NAME
8
+ )
9
+ from src.agent import run_agent
10
+
11
+ st.set_page_config(page_title='AI Agent with Google Search APIs', initial_sidebar_state="auto", menu_items=None)
12
+ st.title("AI Agent with Google Search APIs")
13
+
14
+ tab1, tab2 = st.tabs(["Agent Interface", "About the App"])
15
+
16
+ with tab1:
17
+
18
+ st.sidebar.title("Enter Your API Keys 🗝️")
19
+ open_api_key = st.sidebar.text_input(
20
+ "Open API Key",
21
+ value=st.session_state.get('open_api_key', ''),
22
+ help="Get your API key from https://openai.com/",
23
+ type='password'
24
+ )
25
+ os.environ["OPENAI_API_KEY"] = open_api_key
26
+ serp_api_key = st.sidebar.text_input(
27
+ "Serp API Key",
28
+ value=st.session_state.get('serp_api_key', ''),
29
+ help="Get your API key from https://serpapi.com/",
30
+ type='password'
31
+ )
32
+ os.environ["SERPAPI_API_KEY"] = serp_api_key
33
+
34
+
35
+ st.session_state['open_api_key'] = open_api_key
36
+ st.session_state['serp_api_key'] = serp_api_key
37
+
38
+ with st.sidebar.expander('Advanced Settings ⚙️', expanded=False):
39
+ st.subheader('Advanced Settings ⚙️')
40
+ num_iterations = st.number_input(
41
+ label='Max Iterations',
42
+ value=5,
43
+ min_value=2,
44
+ max_value=20,
45
+ step=1
46
+ )
47
+ baby_agi_model = st.text_input('OpenAI Model', BABY_AGI_MODEL_NAME, help='See model options here: https://platform.openai.com/docs/models/overview')
48
+ todo_chaining_model = st.text_input('OpenAI TODO Model', TODO_CHAIN_MODEL_NAME, help='See model options here: https://platform.openai.com/docs/models/overview')
49
+ embedding_model = st.text_input('OpenAI Embedding Model', EMBEDDING_MODEL_NAME, help='See model options here: https://platform.openai.com/docs/guides/embeddings/what-are-embeddings')
50
+ # embedding_size = st.text_input('Embedding Model Size', EMBEDDING_SIZE, help='See model options here: https://platform.openai.com/docs/guides/embeddings/what-are-embeddings')
51
+
52
+
53
+ user_input = st.text_input(
54
+ "What do you want me to do?",
55
+ key="input"
56
+ )
57
+
58
+ if st.button('Run Agent'):
59
+ if user_input != "" and (open_api_key == '' or serp_api_key == ''):
60
+ st.error("Please enter your API keys in the sidebar")
61
+ elif user_input != "":
62
+ run_agent(
63
+ user_input=user_input,
64
+ num_iterations=num_iterations,
65
+ baby_agi_model=baby_agi_model,
66
+ todo_chaining_model=todo_chaining_model,
67
+ embedding_model=embedding_model,
68
+ # embedding_size=embedding_size
69
+ )
70
+
71
+ # Download the file using Streamlit's download_button() function
72
+ st.download_button(
73
+ label='Download Results',
74
+ data=open('output.txt', 'rb').read(),
75
+ file_name='output.txt',
76
+ mime='text/plain'
77
+ )
78
+ with tab2:
79
+ st.markdown("## Demo Video")
80
+ st.video('https://youtu.be/mluNKqgBLaI')
81
+ st.markdown("## About the Application")
82
+ st.markdown("In the fast-paced world of technology, staying organized and efficiently managing tasks can be a daunting challenge. To address this, a groundbreaking AI-driven task management system called AI Agent has emerged, built with Python and powered by OpenAI. With its integration of advanced vector databases like Chroma and Weaviate, AI Agent offers a seamless solution for generating, prioritizing, and executing tasks with remarkable efficiency.")
83
+ st.markdown("At its core, AI Agent operates in an unending loop, constantly pulling tasks from a list, executing them, enhancing the outcomes, and generating new tasks based on the objective and the outcome of the previous task. This unique workflow can be broken down into four pivotal steps: Task Execution, Result Enrichment, Task Creation, and Task Prioritization. ")
84
+ st.markdown("One of the key strengths of AI Agent lies in its simplicity and ease of comprehension. Users can quickly grasp the system's functionalities and build upon them to customize the AI Agent to suit their specific needs. The well-documented Python codebase and clear API integration allow developers to integrate AI Agent seamlessly into existing workflows, enhancing productivity and streamlining task management processes.")
85
+ st.markdown("With its AI-driven approach, AI Agent not only offers enhanced task management but also provides a foundation for building intelligent systems and automating processes. By employing the power of OpenAI and advanced vector databases, AI Agent represents a significant milestone in the realm of task management, revolutionizing the way individuals and organizations approach their daily workflows.")
constants.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ EMBEDDING_MODEL_NAME = "text-embedding-ada-002"
2
+ EMBEDDING_SIZE = 1536
3
+ TODO_CHAIN_MODEL_NAME = "gpt-3.5-turbo"
4
+ BABY_AGI_MODEL_NAME = "gpt-3.5-turbo"
output.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ TODO list for the objective "List 10 things that happened in AI in 2023 so far. Put it in a table with links to the sources":
2
+ 1. Research and gather information on the latest developments in AI in 2023.
3
+ 2. Create a table with 10 rows and 3 columns: "Event", "Date", and "Source".
4
+ 3. Begin filling in the table with the 10 most significant events in AI in 2023 so far.
5
+ 4. For each event, include the date it occurred and a link to a reputable source that reported on it.
6
+ 5. Double-check all sources to ensure they are reliable and up-to-date.
7
+ 6. Edit and format the table to make it easy to read and visually appealing.
8
+ 7. Review the table to ensure all events are accurately represented and all sources are properly cited.
9
+ 8. Share the table with relevant stakeholders, such as colleagues or clients, as needed.
10
+ 9. Update the table as new events occur throughout the year.
11
+ 10. Continuously monitor developments in AI to ensure the table remains relevant and up-to-date.
12
+
13
+ 1. OpenAI releases GPT-4, the most advanced language model to date. 2. Google announces major updates to its AI-powered search algorithm. 3. Amazon introduces new AI-powered features to its Alexa virtual assistant. 4. IBM launches a new AI-powered cybersecurity platform. 5. Tesla unveils a new self-driving car system. 6. Facebook faces criticism over its use of AI algorithms. 7. Microsoft acquires a leading AI startup. 8. AI-powered medical diagnosis tools show promising results. 9. Google's DeepMind achieves a major breakthrough in protein folding prediction. 10. The use of AI in hiring practices comes under scrutiny.
14
+
15
+ The plan includes researching existing methods and best practices, consulting with AI experts, developing a framework based on their recommendations, testing and refining the framework, and implementing and monitoring its effectiveness.
16
+
requirements.txt ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohttp==3.8.4
2
+ aiosignal==1.3.1
3
+ altair==4.2.2
4
+ appnope==0.1.3
5
+ asttokens==2.2.1
6
+ async-timeout==4.0.2
7
+ attrs==23.1.0
8
+ backcall==0.2.0
9
+ blinker==1.6.2
10
+ cachetools==5.3.0
11
+ certifi==2022.12.7
12
+ charset-normalizer==3.1.0
13
+ click==8.1.3
14
+ dataclasses-json==0.5.7
15
+ decorator==5.1.1
16
+ entrypoints==0.4
17
+ executing==1.2.0
18
+ faiss-cpu==1.7.3
19
+ frozenlist==1.3.3
20
+ gitdb==4.0.10
21
+ GitPython==3.1.31
22
+ google-search-results==2.4.2
23
+ idna==3.4
24
+ importlib-metadata==6.6.0
25
+ ipdb==0.13.13
26
+ ipython==8.12.0
27
+ jedi==0.18.2
28
+ Jinja2==3.1.2
29
+ jsonschema==4.17.3
30
+ langchain==0.0.147
31
+ markdown-it-py==2.2.0
32
+ MarkupSafe==2.1.2
33
+ marshmallow==3.19.0
34
+ marshmallow-enum==1.5.1
35
+ matplotlib-inline==0.1.6
36
+ mdurl==0.1.2
37
+ multidict==6.0.4
38
+ mypy-extensions==1.0.0
39
+ numexpr==2.8.4
40
+ numpy==1.24.2
41
+ openai==0.27.4
42
+ openapi-schema-pydantic==1.2.4
43
+ packaging==23.1
44
+ pandas==1.5.3
45
+ parso==0.8.3
46
+ pexpect==4.8.0
47
+ pickleshare==0.7.5
48
+ Pillow==9.5.0
49
+ prompt-toolkit==3.0.38
50
+ protobuf==3.20.3
51
+ ptyprocess==0.7.0
52
+ pure-eval==0.2.2
53
+ pyarrow==11.0.0
54
+ pydantic==1.10.7
55
+ pydeck==0.8.1b0
56
+ Pygments==2.15.1
57
+ Pympler==1.0.1
58
+ pyrsistent==0.19.3
59
+ python-dateutil==2.8.2
60
+ python-decouple==3.8
61
+ pytz==2023.3
62
+ pytz-deprecation-shim==0.1.0.post0
63
+ PyYAML==6.0
64
+ regex==2023.3.23
65
+ requests==2.28.2
66
+ rich==13.3.4
67
+ six==1.16.0
68
+ smmap==5.0.0
69
+ SQLAlchemy==1.4.47
70
+ stack-data==0.6.2
71
+ streamlit==1.21.0
72
+ tenacity==8.2.2
73
+ tiktoken==0.3.3
74
+ toml==0.10.2
75
+ tomli==2.0.1
76
+ toolz==0.12.0
77
+ tornado==6.3.1
78
+ tqdm==4.65.0
79
+ traitlets==5.9.0
80
+ typing-inspect==0.8.0
81
+ typing_extensions==4.5.0
82
+ tzdata==2023.3
83
+ tzlocal==4.3
84
+ urllib3==1.26.15
85
+ validators==0.20.0
86
+ wcwidth==0.2.6
87
+ yarl==1.9.1
88
+ zipp==3.15.0
src/__pycache__/agent.cpython-311.pyc ADDED
Binary file (3.33 kB). View file
 
src/__pycache__/baby_agi.cpython-311.pyc ADDED
Binary file (12.6 kB). View file
 
src/__pycache__/task_creation_chain.cpython-311.pyc ADDED
Binary file (1.66 kB). View file
 
src/__pycache__/task_prio_chain.cpython-311.pyc ADDED
Binary file (1.54 kB). View file
 
src/agent.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from langchain.embeddings import OpenAIEmbeddings
3
+ from langchain import LLMChain, PromptTemplate
4
+ from langchain.vectorstores import FAISS
5
+ from langchain.docstore import InMemoryDocstore
6
+ from src.baby_agi import BabyAGI
7
+ from langchain.agents import ZeroShotAgent, Tool
8
+ from langchain import OpenAI, SerpAPIWrapper, LLMChain
9
+ from constants import (
10
+ EMBEDDING_MODEL_NAME,
11
+ EMBEDDING_SIZE,
12
+ TODO_CHAIN_MODEL_NAME,
13
+ BABY_AGI_MODEL_NAME
14
+ )
15
+
16
+
17
+ def run_agent(
18
+ user_input,
19
+ num_iterations,
20
+ baby_agi_model=BABY_AGI_MODEL_NAME,
21
+ todo_chaining_model=TODO_CHAIN_MODEL_NAME,
22
+ embedding_model=EMBEDDING_MODEL_NAME
23
+ ):
24
+
25
+ # Define your embedding model
26
+ embeddings_model = OpenAIEmbeddings(model=embedding_model)
27
+ # Initialize the vectorstore as empty
28
+ import faiss
29
+
30
+ embedding_size = EMBEDDING_SIZE
31
+ index = faiss.IndexFlatL2(embedding_size)
32
+ vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
33
+
34
+ todo_prompt = PromptTemplate.from_template(
35
+ "You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}"
36
+ )
37
+ todo_chain = LLMChain(
38
+ llm=OpenAI(temperature=0, model_name=todo_chaining_model),
39
+ prompt=todo_prompt
40
+ )
41
+ search = SerpAPIWrapper()
42
+ tools = [
43
+ Tool(
44
+ name="Search",
45
+ func=search.run,
46
+ description="useful for when you need to answer questions about current events",
47
+ ),
48
+ Tool(
49
+ name="TODO",
50
+ func=todo_chain.run,
51
+ description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!",
52
+ ),
53
+ ]
54
+
55
+ prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}."""
56
+ suffix = """Question: {task}
57
+ {agent_scratchpad}"""
58
+
59
+ prompt = ZeroShotAgent.create_prompt(
60
+ tools,
61
+ prefix=prefix,
62
+ suffix=suffix,
63
+ input_variables=["objective", "task", "context", "agent_scratchpad"],
64
+ )
65
+
66
+ OBJECTIVE = user_input
67
+ llm = OpenAI(temperature=0, model_name=baby_agi_model)
68
+ # Logging of LLMChains
69
+ verbose = False
70
+ # If None, will keep on going forever. Customize the number of loops you want it to go through.
71
+ max_iterations: Optional[int] = num_iterations
72
+ baby_agi = BabyAGI.from_llm(
73
+ prompt=prompt,
74
+ tools=tools,
75
+ llm=llm,
76
+ vectorstore=vectorstore,
77
+ verbose=verbose,
78
+ max_iterations=max_iterations
79
+ )
80
+ if (user_input):
81
+ baby_agi({"objective": OBJECTIVE})
src/baby_agi.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.vectorstores.base import VectorStore
2
+ from pydantic import BaseModel, Field
3
+ from langchain.chains.base import Chain
4
+ from collections import deque
5
+ from typing import Dict, List, Optional, Any
6
+ from langchain.agents import ZeroShotAgent, AgentExecutor
7
+ from src.task_creation_chain import TaskCreationChain
8
+ from src.task_prio_chain import TaskPrioritizationChain
9
+ import streamlit as st
10
+ from langchain import LLMChain
11
+ from langchain.llms import BaseLLM
12
+
13
+ # -----------------helpers
14
+
15
+ def get_next_task(
16
+ task_creation_chain: LLMChain,
17
+ result: Dict,
18
+ task_description: str,
19
+ task_list: List[str],
20
+ objective: str,
21
+ ) -> List[Dict]:
22
+ """Get the next task."""
23
+ incomplete_tasks = ", ".join(task_list)
24
+ response = task_creation_chain.run(
25
+ result=result,
26
+ task_description=task_description,
27
+ incomplete_tasks=incomplete_tasks,
28
+ objective=objective,
29
+ )
30
+ new_tasks = response.split("\n")
31
+ return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()]
32
+
33
+ def prioritize_tasks(
34
+ task_prioritization_chain: LLMChain,
35
+ this_task_id: int,
36
+ task_list: List[Dict],
37
+ objective: str,
38
+ ) -> List[Dict]:
39
+ """Prioritize tasks."""
40
+ task_names = [t["task_name"] for t in task_list]
41
+ next_task_id = int(this_task_id) + 1
42
+ response = task_prioritization_chain.run(
43
+ task_names=task_names, next_task_id=next_task_id, objective=objective
44
+ )
45
+ new_tasks = response.split("\n")
46
+ prioritized_task_list = []
47
+ for task_string in new_tasks:
48
+ if not task_string.strip():
49
+ continue
50
+ task_parts = task_string.strip().split(".", 1)
51
+ if len(task_parts) == 2:
52
+ task_id = task_parts[0].strip()
53
+ task_name = task_parts[1].strip()
54
+ prioritized_task_list.append({"task_id": task_id, "task_name": task_name})
55
+ return prioritized_task_list
56
+
57
+ def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]:
58
+ """Get the top k tasks based on the query."""
59
+ results = vectorstore.similarity_search_with_score(query, k=k)
60
+ if not results:
61
+ return []
62
+ sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True))
63
+ return [str(item.metadata["task"]) for item in sorted_results]
64
+
65
+
66
+ def execute_task(
67
+ vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5
68
+ ) -> str:
69
+ """Execute a task."""
70
+ context = _get_top_tasks(vectorstore, query=objective, k=k)
71
+ return execution_chain.run(objective=objective, context=context, task=task)
72
+
73
+
74
+ # ---------------Class-------------
75
+
76
+
77
+ class BabyAGI(Chain, BaseModel):
78
+ """Controller model for the BabyAGI agent."""
79
+
80
+ task_list: deque = Field(default_factory=deque)
81
+ task_creation_chain: TaskCreationChain = Field(...)
82
+ task_prioritization_chain: TaskPrioritizationChain = Field(...)
83
+ execution_chain: AgentExecutor = Field(...)
84
+ task_id_counter: int = Field(1)
85
+ vectorstore: VectorStore = Field(init=False)
86
+ max_iterations: Optional[int] = None
87
+
88
+ class Config:
89
+ """Configuration for this pydantic object."""
90
+
91
+ arbitrary_types_allowed = True
92
+
93
+ def add_task(self, task: Dict):
94
+ self.task_list.append(task)
95
+
96
+ def print_task_list(self):
97
+ print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
98
+ if len(self.task_list) > 1:
99
+ st.write('**Task List:** \n')
100
+ for t in self.task_list:
101
+ print(str(t["task_id"]) + ": " + t["task_name"])
102
+ if len(self.task_list) > 1:
103
+ st.write(str(t["task_id"]) + ": " + t["task_name"])
104
+
105
+ def print_next_task(self, task: Dict):
106
+ print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
107
+ print(str(task["task_name"]))
108
+ return (str(task["task_name"]))
109
+
110
+ def print_task_result(self, result: str):
111
+ print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
112
+ return(result)
113
+
114
+ @property
115
+ def input_keys(self) -> List[str]:
116
+ return ["objective"]
117
+
118
+ @property
119
+ def output_keys(self) -> List[str]:
120
+ return []
121
+
122
+ def _call(_self, inputs: Dict[str, Any]) -> Dict[str, Any]:
123
+ result_list = []
124
+ """Run the agent."""
125
+ objective = inputs["objective"]
126
+ first_task = inputs.get("first_task", f"Make a todo list to accomplish the objective: {objective}")
127
+ _self.add_task({"task_id": 1, "task_name": first_task})
128
+ num_iters = 0
129
+ while True:
130
+ if _self.task_list:
131
+ _self.print_task_list()
132
+
133
+ # Step 1: Pull the first task
134
+ task = _self.task_list.popleft()
135
+ _self.print_next_task(task)
136
+ st.write('**Next Task:** \n')
137
+ st.write(_self.print_next_task(task))
138
+
139
+ # Step 2: Execute the task
140
+ result = execute_task(
141
+ _self.vectorstore, _self.execution_chain, objective, task["task_name"]
142
+ )
143
+ this_task_id = int(task["task_id"])
144
+ _self.print_task_result(result)
145
+ st.write('**Result from Task:** \n')
146
+ st.write(_self.print_task_result(result))
147
+ result_list.append(result)
148
+
149
+ # Step 3: Store the result in Pinecone
150
+ result_id = f"result_{task['task_id']}"
151
+ _self.vectorstore.add_texts(
152
+ texts=[result],
153
+ metadatas=[{"task": task["task_name"]}],
154
+ ids=[result_id],
155
+ )
156
+
157
+ # Step 4: Create new tasks and reprioritize task list
158
+ new_tasks = get_next_task(
159
+ _self.task_creation_chain,
160
+ result,
161
+ task["task_name"],
162
+ [t["task_name"] for t in _self.task_list],
163
+ objective,
164
+ )
165
+ for new_task in new_tasks:
166
+ _self.task_id_counter += 1
167
+ new_task.update({"task_id": _self.task_id_counter})
168
+ _self.add_task(new_task)
169
+ _self.task_list = deque(
170
+ prioritize_tasks(
171
+ _self.task_prioritization_chain,
172
+ this_task_id,
173
+ list(_self.task_list),
174
+ objective,
175
+ )
176
+ )
177
+ num_iters += 1
178
+ if _self.max_iterations is not None and num_iters == _self.max_iterations:
179
+ print(
180
+ "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m"
181
+ )
182
+ st.success('Task Completed!', icon="✅")
183
+ break
184
+
185
+ # Create a temporary file to hold the text
186
+ with open('output.txt', 'w') as f:
187
+ for item in result_list:
188
+ f.write(item)
189
+ f.write("\n\n")
190
+
191
+ return {}
192
+
193
+ @classmethod
194
+ def from_llm(
195
+ cls,
196
+ prompt: str,
197
+ tools: list,
198
+ llm: BaseLLM,
199
+ vectorstore: VectorStore,
200
+ verbose: bool = False,
201
+ **kwargs
202
+ ) -> "BabyAGI":
203
+ """Initialize the BabyAGI Controller."""
204
+ task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose)
205
+ task_prioritization_chain = TaskPrioritizationChain.from_llm(
206
+ llm, verbose=verbose
207
+ )
208
+ llm_chain = LLMChain(llm=llm, prompt=prompt)
209
+ tool_names = [tool.name for tool in tools]
210
+ agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
211
+ agent_executor = AgentExecutor.from_agent_and_tools(
212
+ agent=agent, tools=tools, verbose=True
213
+ )
214
+ return cls(
215
+ task_creation_chain=task_creation_chain,
216
+ task_prioritization_chain=task_prioritization_chain,
217
+ execution_chain=agent_executor,
218
+ vectorstore=vectorstore,
219
+ **kwargs,
220
+ )
221
+
src/task_creation_chain.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.llms import BaseLLM
2
+ from langchain import LLMChain, PromptTemplate
3
+
4
+
5
+ class TaskCreationChain(LLMChain):
6
+ """Chain to generates tasks."""
7
+
8
+ @classmethod
9
+ def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
10
+ """Get the response parser."""
11
+ task_creation_template = (
12
+ "You are an task creation AI that uses the result of an execution agent"
13
+ " to create new tasks with the following objective: {objective},"
14
+ " The last completed task has the result: {result}."
15
+ " This result was based on this task description: {task_description}."
16
+ " These are incomplete tasks: {incomplete_tasks}."
17
+ " Based on the result, create new tasks to be completed"
18
+ " by the AI system that do not overlap with incomplete tasks."
19
+ " Return the tasks as an array."
20
+ )
21
+ prompt = PromptTemplate(
22
+ template=task_creation_template,
23
+ input_variables=[
24
+ "result",
25
+ "task_description",
26
+ "incomplete_tasks",
27
+ "objective",
28
+ ],
29
+ )
30
+ return cls(prompt=prompt, llm=llm, verbose=verbose)
src/task_prio_chain.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.llms import BaseLLM
2
+ from langchain import LLMChain, PromptTemplate
3
+
4
+ class TaskPrioritizationChain(LLMChain):
5
+ """Chain to prioritize tasks."""
6
+
7
+ @classmethod
8
+ def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
9
+ """Get the response parser."""
10
+ task_prioritization_template = (
11
+ "You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing"
12
+ " the following tasks: {task_names}."
13
+ " Consider the ultimate objective of your team: {objective}."
14
+ " Do not remove any tasks. Return the result as a numbered list, like:"
15
+ " #. First task"
16
+ " #. Second task"
17
+ " Start the task list with number {next_task_id}."
18
+ )
19
+ prompt = PromptTemplate(
20
+ template=task_prioritization_template,
21
+ input_variables=["task_names", "next_task_id", "objective"],
22
+ )
23
+ return cls(prompt=prompt, llm=llm, verbose=verbose)