umair894 commited on
Commit
64d121f
1 Parent(s): 3630175

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +281 -0
app.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+ from typing import Dict, List, Optional
3
+ from langchain import LLMChain, OpenAI, PromptTemplate
4
+ from langchain.embeddings import HuggingFaceEmbeddings
5
+ from langchain.llms import BaseLLM
6
+ from langchain.vectorstores import FAISS
7
+ from langchain.vectorstores.base import VectorStore
8
+ from pydantic import BaseModel, Field
9
+ import streamlit as st
10
+
11
+ class TaskCreationChain(LLMChain):
12
+ @classmethod
13
+ def from_llm(cls, llm: BaseLLM, objective: str, verbose: bool = True) -> LLMChain:
14
+ """Get the response parser."""
15
+ task_creation_template = (
16
+ "You are an task creation AI that uses the result of an execution agent"
17
+ " to create new tasks with the following objective: {objective},"
18
+ " The last completed task has the result: {result}."
19
+ " This result was based on this task description: {task_description}."
20
+ " These are incomplete tasks: {incomplete_tasks}."
21
+ " Based on the result, create new tasks to be completed"
22
+ " by the AI system that do not overlap with incomplete tasks."
23
+ " Return the tasks as an array."
24
+ )
25
+ prompt = PromptTemplate(
26
+ template=task_creation_template,
27
+ partial_variables={"objective": objective},
28
+ input_variables=["result", "task_description", "incomplete_tasks"],
29
+ )
30
+ return cls(prompt=prompt, llm=llm, verbose=verbose)
31
+
32
+ def get_next_task(self, result: Dict, task_description: str, task_list: List[str]) -> List[Dict]:
33
+ """Get the next task."""
34
+ incomplete_tasks = ", ".join(task_list)
35
+ response = self.run(result=result, task_description=task_description, incomplete_tasks=incomplete_tasks)
36
+ new_tasks = response.split('\n')
37
+ return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()]
38
+
39
+
40
+ class TaskPrioritizationChain(LLMChain):
41
+ """Chain to prioritize tasks."""
42
+
43
+ @classmethod
44
+ def from_llm(cls, llm: BaseLLM, objective: str, verbose: bool = True) -> LLMChain:
45
+ """Get the response parser."""
46
+ task_prioritization_template = (
47
+ "You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing"
48
+ " the following tasks: {task_names}."
49
+ " Consider the ultimate objective of your team: {objective}."
50
+ " Do not remove any tasks. Return the result as a numbered list, like:"
51
+ " #. First task"
52
+ " #. Second task"
53
+ " Start the task list with number {next_task_id}."
54
+ )
55
+ prompt = PromptTemplate(
56
+ template=task_prioritization_template,
57
+ partial_variables={"objective": objective},
58
+ input_variables=["task_names", "next_task_id"],
59
+ )
60
+ return cls(prompt=prompt, llm=llm, verbose=verbose)
61
+
62
+ def prioritize_tasks(self, this_task_id: int, task_list: List[Dict]) -> List[Dict]:
63
+ """Prioritize tasks."""
64
+ task_names = [t["task_name"] for t in task_list]
65
+ next_task_id = int(this_task_id) + 1
66
+ response = self.run(task_names=task_names, next_task_id=next_task_id)
67
+ new_tasks = response.split('\n')
68
+ prioritized_task_list = []
69
+ for task_string in new_tasks:
70
+ if not task_string.strip():
71
+ continue
72
+ task_parts = task_string.strip().split(".", 1)
73
+ if len(task_parts) == 2:
74
+ task_id = task_parts[0].strip()
75
+ task_name = task_parts[1].strip()
76
+ prioritized_task_list.append({"task_id": task_id, "task_name": task_name})
77
+ return prioritized_task_list
78
+
79
+
80
+ class ExecutionChain(LLMChain):
81
+ """Chain to execute tasks."""
82
+
83
+ vectorstore: VectorStore = Field(init=False)
84
+
85
+ @classmethod
86
+ def from_llm(cls, llm: BaseLLM, vectorstore: VectorStore, verbose: bool = True) -> LLMChain:
87
+ """Get the response parser."""
88
+ execution_template = (
89
+ "You are an AI who performs one task based on the following objective: {objective}."
90
+ " Take into account these previously completed tasks: {context}."
91
+ " Your task: {task}."
92
+ " Response:"
93
+ )
94
+ prompt = PromptTemplate(
95
+ template=execution_template,
96
+ input_variables=["objective", "context", "task"],
97
+ )
98
+ return cls(prompt=prompt, llm=llm, verbose=verbose, vectorstore=vectorstore)
99
+
100
+ def _get_top_tasks(self, query: str, k: int) -> List[str]:
101
+ """Get the top k tasks based on the query."""
102
+ results = self.vectorstore.similarity_search_with_score(query, k=k)
103
+ if not results:
104
+ return []
105
+ sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True))
106
+ return [str(item.metadata['task']) for item in sorted_results]
107
+
108
+ def execute_task(self, objective: str, task: str, k: int = 5) -> str:
109
+ """Execute a task."""
110
+ context = self._get_top_tasks(query=objective, k=k)
111
+ return self.run(objective=objective, context=context, task=task)
112
+
113
+
114
+ class Message:
115
+ exp: st.expander
116
+ ai_icon = "chatbot.gif"
117
+ def __init__(self, label: str):
118
+ message_area, icon_area = st.columns([10, 1])
119
+ icon_area.image(self.ai_icon, caption="Blazing Away")
120
+
121
+ # Expander
122
+ self.exp = message_area.expander(label=label, expanded=True)
123
+
124
+ def __enter__(self):
125
+ return self
126
+
127
+ def __exit__(self, ex_type, ex_value, trace):
128
+ pass
129
+
130
+ def write(self, content):
131
+ self.exp.markdown(content)
132
+
133
+
134
+ class BabyAGI(BaseModel):
135
+ """Controller model for the BabyAGI agent."""
136
+
137
+ objective: str = Field(alias="objective")
138
+ task_list: deque = Field(default_factory=deque)
139
+ task_creation_chain: TaskCreationChain = Field(...)
140
+ task_prioritization_chain: TaskPrioritizationChain = Field(...)
141
+ execution_chain: ExecutionChain = Field(...)
142
+ task_id_counter: int = Field(1)
143
+
144
+ def add_task(self, task: Dict):
145
+ self.task_list.append(task)
146
+
147
+ def print_task_list(self):
148
+ with Message(label="Toke-worthy Tasks") as m:
149
+ #m.write("### Task List")
150
+ for t in self.task_list:
151
+ m.write("- " + str(t["task_id"]) + ": " + t["task_name"])
152
+ m.write("")
153
+
154
+ def print_next_task(self, task: Dict):
155
+ with Message(label="Rolling fresh High-deas") as m:
156
+ #m.write("### Next Task")
157
+ #m.write("- " + str(task["task_id"]) + ": " + task["task_name"])
158
+ m.write("- " + "Lighting up!" + ": " + task["task_name"])
159
+ m.write("")
160
+
161
+ def print_task_result(self, result: str):
162
+ with Message(label="“Pot-Propelled Progress”") as m:
163
+ #m.write("### Task Result")
164
+ m.write(result)
165
+ m.write("")
166
+
167
+ def print_task_ending(self):
168
+ with Message(label="Task Ended") as m:
169
+ m.write("### Task Ended")
170
+ m.write("")
171
+
172
+
173
+ def run(self, max_iterations: Optional[int] = None):
174
+ """Run the agent."""
175
+ num_iters = 0
176
+ while True:
177
+ if self.task_list:
178
+ self.print_task_list()
179
+
180
+ # Step 1: Pull the first task
181
+ task = self.task_list.popleft()
182
+ self.print_next_task(task)
183
+
184
+ # Step 2: Execute the task
185
+ result = self.execution_chain.execute_task(
186
+ self.objective, task["task_name"]
187
+ )
188
+ this_task_id = int(task["task_id"])
189
+ self.print_task_result(result)
190
+
191
+ # Step 3: Store the result in Pinecone
192
+ result_id = f"result_{task['task_id']}"
193
+ self.execution_chain.vectorstore.add_texts(
194
+ texts=[result],
195
+ metadatas=[{"task": task["task_name"]}],
196
+ ids=[result_id],
197
+ )
198
+
199
+ # Step 4: Create new tasks and reprioritize task list
200
+ new_tasks = self.task_creation_chain.get_next_task(
201
+ result, task["task_name"], [t["task_name"] for t in self.task_list]
202
+ )
203
+ for new_task in new_tasks:
204
+ self.task_id_counter += 1
205
+ new_task.update({"task_id": self.task_id_counter})
206
+ self.add_task(new_task)
207
+ self.task_list = deque(
208
+ self.task_prioritization_chain.prioritize_tasks(
209
+ this_task_id, list(self.task_list)
210
+ )
211
+ )
212
+ num_iters += 1
213
+ if max_iterations is not None and num_iters == max_iterations:
214
+ self.print_task_ending()
215
+ break
216
+
217
+ @classmethod
218
+ def from_llm_and_objectives(
219
+ cls,
220
+ llm: BaseLLM,
221
+ vectorstore: VectorStore,
222
+ objective: str,
223
+ first_task: str,
224
+ verbose: bool = False,
225
+ ) -> "BabyAGI":
226
+ """Initialize the BabyAGI Controller."""
227
+ task_creation_chain = TaskCreationChain.from_llm(
228
+ llm, objective, verbose=verbose
229
+ )
230
+ task_prioritization_chain = TaskPrioritizationChain.from_llm(
231
+ llm, objective, verbose=verbose
232
+ )
233
+ execution_chain = ExecutionChain.from_llm(llm, vectorstore, verbose=verbose)
234
+ controller = cls(
235
+ objective=objective,
236
+ task_creation_chain=task_creation_chain,
237
+ task_prioritization_chain=task_prioritization_chain,
238
+ execution_chain=execution_chain,
239
+ )
240
+ controller.add_task({"task_id": 1, "task_name": first_task})
241
+ return controller
242
+
243
+ def main():
244
+ st.set_page_config(
245
+ initial_sidebar_state="expanded",
246
+ page_title="Greenlync Bot",
247
+ layout="centered",
248
+ )
249
+ st.markdown("""
250
+ <style>
251
+ footer {visibility: hidden;}
252
+ </style>""", unsafe_allow_html=True)
253
+
254
+ with st.sidebar:
255
+ openai_api_key = st.text_input('Your OpenAI API KEY', type="password")
256
+
257
+ st.title("AutoGPT")
258
+ objective = st.text_input("Enter your query:", "")
259
+ first_task = st.text_input("Reference /Research URL (if any)", "Default: Google")
260
+ max_iterations = st.number_input("High Limit Rotation", value=3, min_value=1, step=1)
261
+ button = st.button("Generate")
262
+
263
+ embedding_model = HuggingFaceEmbeddings()
264
+ vectorstore = FAISS.from_texts(["_"], embedding_model, metadatas=[{"task":first_task}])
265
+
266
+ if button:
267
+ try:
268
+ baby_agi = BabyAGI.from_llm_and_objectives(
269
+ llm=OpenAI(openai_api_key=openai_api_key),
270
+ vectorstore=vectorstore,
271
+ objective=objective,
272
+ first_task=first_task,
273
+ verbose=False
274
+ )
275
+ baby_agi.run(max_iterations=max_iterations)
276
+ except Exception as e:
277
+ st.error(e)
278
+
279
+
280
+ if __name__ == "__main__":
281
+ main()