omkarenator commited on
Commit
708d898
1 Parent(s): ce9f871

initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+ .DS_Store
README.md CHANGED
@@ -1,2 +1,13 @@
1
- # AutoAgents
2
- AutoAgents!
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Search Llm
3
+ emoji: 🐢
4
+ colorFrom: green
5
+ colorTo: purple
6
+ sdk: streamlit
7
+ sdk_version: 1.21.0
8
+ python_version: 3.10.11
9
+ app_file: autoagents/spaces/app.py
10
+ pinned: false
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
autoagents/__init__.py ADDED
File without changes
autoagents/agents/__init__.py ADDED
File without changes
autoagents/agents/search.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Any, Optional, Dict
2
+ import uuid
3
+ import re
4
+ from datetime import date
5
+ import asyncio
6
+ from collections import defaultdict
7
+
8
+ from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
9
+ from langchain.prompts import StringPromptTemplate
10
+ from langchain import LLMChain
11
+ from langchain.chat_models import ChatOpenAI
12
+ from langchain.schema import AgentAction, AgentFinish
13
+ from langchain.callbacks import get_openai_callback
14
+ from langchain.callbacks.base import AsyncCallbackHandler
15
+ from langchain.callbacks.manager import AsyncCallbackManager
16
+
17
+
18
+ from autoagents.tools.tools import search_tool, note_tool, rewrite_search_query
19
+ from autoagents.utils.logger import InteractionsLogger
20
+
21
+
22
+ # Set up the base template
23
+ template = """
24
+ We are working together to satisfy the user's original goal step-by-step. Play to your strengths as an LLM.
25
+ Make sure the plan is achievable using the
26
+ available tools. You SHOULD directly produce a `Final Answer:` when you
27
+ think you have good-enough information to achieve the Goal. The final answer should be descriptive should be descriptive, encompassing all relevant details..
28
+ Today is {today}.
29
+
30
+ ## Goal:
31
+ {input}
32
+
33
+ If you require assistance or additional information, you should use *only* one of the following tools:
34
+ {tools}.
35
+
36
+ ## Output format
37
+ You MUST produce Output in the following format:
38
+
39
+ Thought: you should always think about what to do when you think you have not achieved the Goal.
40
+ Reasoning: reasoning
41
+ Plan:
42
+ - short bulleted
43
+ - list that conveys
44
+ - next-step plan
45
+ Action: the action to take, should be ONE OF {tool_names}
46
+ Action Input: the input to the Action
47
+ Observation: the result of the Action
48
+ ... (this Thought/Reasoning/Plan/Action/Action Input/Observation can repeat N
49
+ times until there is a Final Answer)
50
+ Final Answer: the final answer to achieve the original Goal which can be the
51
+ only output or when you have no Action to do next.
52
+
53
+ ## History
54
+ {agent_scratchpad}
55
+
56
+ Do not repeat any past actions in History, because you will not get additional information.
57
+ If the last action is search, then you should use notepad to keep critical information.
58
+ If you have gathered all information in your plannings to satisfy the user's original goal, then respond immediately as the Final Answer.
59
+ """
60
+
61
+
62
+ # Set up a prompt template
63
+ class CustomPromptTemplate(StringPromptTemplate):
64
+ # The template to use
65
+ template: str
66
+ # The list of tools available
67
+ tools: List[Tool]
68
+ ialogger: InteractionsLogger
69
+
70
+ def format(self, **kwargs) -> str:
71
+ # Get the intermediate steps (AgentAction, Observation tuples)
72
+ # Format them in a particular way
73
+ intermediate_steps = kwargs.pop("intermediate_steps")
74
+ outputs = ""
75
+ # Set the agent_scratchpad variable to that value
76
+ for action, observation in intermediate_steps[:-1]:
77
+ outputs += f"{action.log}\n"
78
+ if len(intermediate_steps) > 0:
79
+ action, observation = intermediate_steps[-1]
80
+ self.ialogger.add_system({"action": action, "observation": observation})
81
+ if action.tool not in ("Search", "Notepad"):
82
+ raise Exception("Invalid tool requested by the model.")
83
+ if action.tool == "Notepad":
84
+ outputs += f"{action.log}\n"
85
+ outputs += f"Observation: {observation}\n"
86
+ elif action.tool == "Search":
87
+ current = "".join([f"{d}" for d in observation])
88
+ outputs += f"{action.log}\n"
89
+ outputs += f"Observation: {current}\n"
90
+
91
+ # Parse the output ofr the last step for the reasoning and plan
92
+ regex = r"Thought\s*\d*\s*:(.*?)\n(.*)"
93
+ match = re.search(regex, action.log, re.DOTALL)
94
+ thoughts = match.group(1).strip() if match else ""
95
+
96
+ regex = r"Reasoning\s*\d*\s*:(.*?)\n(.*)"
97
+ match = re.search(regex, action.log, re.DOTALL)
98
+ reasoning = match.group(1).strip() if match else ""
99
+
100
+ regex = r"Plan\s*\d*\s*:(.*?)\nAction(.*)"
101
+ match = re.search(regex, action.log, re.DOTALL)
102
+ plans = match.group(1).strip() if match else ""
103
+ self.ialogger.add_structured_data({"output":{"thoughts": thoughts,
104
+ "reasoning": reasoning,
105
+ "plans": plans,
106
+ "action": action.tool,
107
+ "action_input": action.tool_input,
108
+ "raw_output":action.log},
109
+ "observation": observation})
110
+ kwargs["agent_scratchpad"] = outputs
111
+ # Create a tools variable from the list of tools provided
112
+ kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
113
+ # Create a list of tool names for the tools provided
114
+ kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
115
+ kwargs["today"] = date.today()
116
+ final_prompt = self.template.format(**kwargs)
117
+ if not intermediate_steps:
118
+ # first iteration
119
+ self.ialogger.add_system({"prompt": final_prompt})
120
+ return final_prompt
121
+
122
+
123
+ class CustomOutputParser(AgentOutputParser):
124
+ class Config:
125
+ arbitrary_types_allowed = True
126
+ ialogger: InteractionsLogger
127
+ api_key: str
128
+ new_action_input: Optional[str]
129
+
130
+ action_history = defaultdict(set)
131
+
132
+ def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
133
+ self.ialogger.add_ai(llm_output)
134
+ # Check if agent should finish
135
+ if "Final Answer:" in llm_output:
136
+ final_answer = llm_output.split("Final Answer:")[-1].strip()
137
+ self.ialogger.add_structured_data({"output": {"action": "Final Answer",
138
+ "action_input": final_answer,
139
+ "raw_output": llm_output}})
140
+ return AgentFinish(
141
+ # Return values is generally always a dictionary with a single `output` key
142
+ # It is not recommended to try anything else at the moment :)
143
+ return_values={"output": final_answer},
144
+ log=llm_output,
145
+ )
146
+ # Parse out the action and action input
147
+ regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
148
+ match = re.search(regex, llm_output, re.DOTALL)
149
+ if not match:
150
+ raise ValueError(f"Could not parse LLM output: `{llm_output}`")
151
+ action = match.group(1).strip()
152
+ action_input = match.group(2).strip().strip('"')
153
+
154
+ if action_input in self.action_history[action]:
155
+ new_action_input = rewrite_search_query(action_input,
156
+ self.action_history[action],
157
+ self.api_key)
158
+ self.new_action_input = new_action_input
159
+ self.action_history[action].add(new_action_input)
160
+ return AgentAction(tool=action, tool_input=new_action_input, log=llm_output)
161
+ else:
162
+ # Return the action and action input
163
+ self.action_history[action].add(action_input)
164
+ return AgentAction(tool=action, tool_input=action_input, log=llm_output)
165
+
166
+
167
+ class ActionRunner:
168
+ def __init__(self, outputq, api_key: str, model_name: str, persist_logs=False):
169
+ self.ialogger = InteractionsLogger(name=f"{uuid.uuid4().hex[:6]}", persist=persist_logs)
170
+ tools = [search_tool, note_tool]
171
+ prompt = CustomPromptTemplate(
172
+ template=template,
173
+ tools=tools,
174
+ input_variables=["input", "intermediate_steps"],
175
+ ialogger=self.ialogger)
176
+
177
+ output_parser = CustomOutputParser(ialogger=self.ialogger, api_key=api_key)
178
+
179
+ class MyCustomHandler(AsyncCallbackHandler):
180
+ def __init__(self):
181
+ pass
182
+
183
+ async def on_chain_end(self, outputs, **kwargs) -> None:
184
+ if "text" in outputs:
185
+ await outputq.put(outputs["text"])
186
+
187
+ async def on_agent_action(
188
+ self,
189
+ action: AgentAction,
190
+ *,
191
+ run_id: uuid.UUID,
192
+ parent_run_id: Optional[uuid.UUID] = None,
193
+ **kwargs: Any,
194
+ ) -> None:
195
+ if (new_action_input := output_parser.new_action_input):
196
+ # Notify users
197
+ await outputq.put(RuntimeWarning(f"Action Input Rewritten: {new_action_input}"))
198
+ output_parser.new_action_input = None
199
+
200
+ async def on_tool_start(
201
+ self,
202
+ serialized: Dict[str, Any],
203
+ input_str: str,
204
+ *,
205
+ run_id: uuid.UUID,
206
+ parent_run_id: Optional[uuid.UUID] = None,
207
+ **kwargs: Any,
208
+ ) -> None:
209
+ pass
210
+
211
+ async def on_tool_end(
212
+ self,
213
+ output: str,
214
+ *,
215
+ run_id: uuid.UUID,
216
+ parent_run_id: Optional[uuid.UUID] = None,
217
+ **kwargs: Any,
218
+ ) -> None:
219
+ await outputq.put(output)
220
+
221
+ handler = MyCustomHandler()
222
+
223
+ llm = ChatOpenAI(openai_api_key=api_key, temperature=0, model_name=model_name)
224
+ llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=[handler])
225
+ tool_names = [tool.name for tool in tools]
226
+ for tool in tools:
227
+ tool.callbacks = [handler]
228
+
229
+ agent = LLMSingleActionAgent(
230
+ llm_chain=llm_chain,
231
+ output_parser=output_parser,
232
+ stop=["\nObservation:"],
233
+ allowed_tools=tool_names
234
+ )
235
+ callback_manager = AsyncCallbackManager([handler])
236
+
237
+ # Finally create the Executor
238
+ self.agent_executor = AgentExecutor.from_agent_and_tools(agent=agent,
239
+ tools=tools,
240
+ verbose=False,
241
+ callback_manager=callback_manager)
242
+
243
+ async def run(self, goal: str, outputq):
244
+ self.ialogger.set_goal(goal)
245
+ try:
246
+ with get_openai_callback() as cb:
247
+ output = await self.agent_executor.arun(goal)
248
+ self.ialogger.add_cost({"total_tokens": cb.total_tokens,
249
+ "prompt_tokens": cb.prompt_tokens,
250
+ "completion_tokens": cb.completion_tokens,
251
+ "total_cost": cb.total_cost,
252
+ "successful_requests": cb.successful_requests})
253
+ self.ialogger.save()
254
+ except Exception as e:
255
+ self.ialogger.add_message({"error": str(e)})
256
+ self.ialogger.save()
257
+ await outputq.put(Exception("Something went wrong. Please try searching again."))
258
+ return
259
+ return output
autoagents/spaces/app.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import asyncio
3
+ import random
4
+ from ast import literal_eval
5
+
6
+ import streamlit as st
7
+ import openai
8
+
9
+ from autoagents.utils.constants import MAIN_HEADER, MAIN_CAPTION, SAMPLE_QUESTIONS
10
+ from autoagents.agents.search import ActionRunner
11
+
12
+
13
+ async def run():
14
+ output_acc = ""
15
+ st.session_state["random"] = random.randint(0, 99)
16
+ if "task" not in st.session_state:
17
+ st.session_state.task = None
18
+ if "model_name" not in st.session_state:
19
+ st.session_state.model_name = "gpt-3.5-turbo"
20
+
21
+ st.set_page_config(
22
+ page_title="Search Agent",
23
+ page_icon="🤖",
24
+ layout="wide",
25
+ initial_sidebar_state="expanded",
26
+ )
27
+
28
+ st.title(MAIN_HEADER)
29
+ st.caption(MAIN_CAPTION)
30
+
31
+ with st.form("my_form", clear_on_submit=False):
32
+ st.markdown("<style> .inter { white-space: pre-line; } </style>", unsafe_allow_html=True)
33
+ user_input = st.text_input(
34
+ "You: ",
35
+ key="input",
36
+ placeholder="Ask me anything ...",
37
+ label_visibility="hidden",
38
+ )
39
+
40
+ submitted = st.form_submit_button(
41
+ "Search", help="Hit to submit the search query."
42
+ )
43
+
44
+ # Ask the user to enter their OpenAI API key
45
+ API_O = st.sidebar.text_input("OpenAI api-key", type="password") or os.getenv(
46
+ "OPENAI_API_KEY"
47
+ )
48
+ with st.sidebar:
49
+ model_dict = {
50
+ "gpt-3.5-turbo": "GPT-3.5-turbo",
51
+ "gpt-4": "GPT-4 (Recommneded for better quality results)",
52
+ }
53
+ st.radio(
54
+ "OpenAI model",
55
+ model_dict.keys(),
56
+ key="model_name",
57
+ format_func=lambda x: model_dict[x],
58
+ )
59
+
60
+ st.markdown("**Example Queries:**")
61
+ for q in SAMPLE_QUESTIONS:
62
+ st.markdown(f"*{q}*")
63
+
64
+ if not API_O:
65
+ st.warning(
66
+ "API key required to try this app. The API key is not stored in any form. [This](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key) might help."
67
+ )
68
+ else:
69
+ outputq = asyncio.Queue()
70
+ runner = ActionRunner(
71
+ outputq,
72
+ api_key=API_O,
73
+ model_name=st.session_state.model_name,
74
+ persist_logs=True,
75
+ ) # log to HF-dataset
76
+
77
+ async def cleanup(log: str):
78
+ st.error(log)
79
+ await st.session_state.task
80
+ st.session_state.task = None
81
+ st.stop()
82
+
83
+ placeholder = st.empty()
84
+
85
+ if user_input and submitted:
86
+ if st.session_state.task is not None:
87
+ with placeholder.container():
88
+ st.session_state.task.cancel()
89
+ st.warning("Previous search aborted", icon="⚠️")
90
+
91
+ st.session_state.task = asyncio.create_task(
92
+ runner.run(user_input, outputq)
93
+ )
94
+ iterations = 0
95
+ with st.expander("Search Results", expanded=True):
96
+ while True:
97
+ with st.spinner("Wait for it..."):
98
+ output = await outputq.get()
99
+ placeholder.empty()
100
+ if isinstance(output, Exception):
101
+ if isinstance(output, openai.error.AuthenticationError):
102
+ await cleanup(
103
+ f"AuthenticationError: {output.json_body}"
104
+ )
105
+ elif isinstance(output, RuntimeWarning):
106
+ st.warning(output)
107
+ continue
108
+ else:
109
+ await cleanup(str(output))
110
+ return
111
+ try:
112
+ output_fmt = literal_eval(output)
113
+ st.json(output_fmt, expanded=False)
114
+ st.write("---")
115
+ iterations += 1
116
+ except:
117
+ output_acc += "\n" + output
118
+ st.markdown(f"<div class=\"inter\"> {output} </div>",
119
+ unsafe_allow_html=True)
120
+ if iterations >= runner.agent_executor.max_iterations:
121
+ await cleanup(
122
+ f"Maximum iterations ({iterations}) exceeded. You can try running the search again or try a variation of the query."
123
+ )
124
+ return
125
+ if "Final Answer:" in output:
126
+ break
127
+ # Found the answer
128
+ final_answer = await st.session_state.task
129
+ final_answer = final_answer.replace("$", "\$")
130
+ # st.success accepts md
131
+ st.success(final_answer, icon="✅")
132
+ st.balloons()
133
+ st.session_state.task = None
134
+ st.stop()
135
+
136
+ if __name__ == "__main__":
137
+ loop = asyncio.new_event_loop()
138
+ loop.set_debug(enabled=False)
139
+ loop.run_until_complete(run())
autoagents/tools/__init__.py ADDED
File without changes
autoagents/tools/tools.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain import PromptTemplate, OpenAI, LLMChain
2
+ from langchain.agents import Tool
3
+ from duckpy import Client
4
+ from langchain.chat_models import ChatOpenAI
5
+
6
+
7
+ MAX_SEARCH_RESULTS = 20 # Number of search results to observe at a time
8
+
9
+ search_description = """ Useful for when you need to ask with search. Use direct language and be
10
+ EXPLICIT in what you want to search.
11
+
12
+ ## Examples of incorrect use
13
+ 1. Action: Search
14
+ Action Input: "[name of bagel shop] menu"
15
+
16
+ The Action Input cannot be None or empty.
17
+ """
18
+
19
+ notepad_description = """ Useful for when you need to note-down specific
20
+ information for later reference. Please provide full information you want to
21
+ note-down in the Action Input and all future prompts will remember it.
22
+ This is the mandatory tool after using the search tool.
23
+ Using Notepad does not always lead to a final answer.
24
+
25
+ ## Exampels of using notepad tool
26
+ Action: Notepad
27
+ Action Input: the information you want to note-down
28
+ """
29
+
30
+ async def ddg(query: str):
31
+ if query is None or query.lower().strip().strip('"') == "none" or query.lower().strip().strip('"') == "null":
32
+ x = "The action input field is empty. Please provide a search query."
33
+ return [x]
34
+ else:
35
+ client = Client()
36
+ return client.search(query)[:MAX_SEARCH_RESULTS]
37
+
38
+
39
+ async def notepad(x: str) -> str:
40
+ return f"{[x]}"
41
+
42
+
43
+ search_tool = Tool(name="Search",
44
+ func=lambda x: x,
45
+ coroutine=ddg,
46
+ description=search_description)
47
+
48
+ note_tool = Tool(name="Notepad",
49
+ func=lambda x: x,
50
+ coroutine=notepad,
51
+ description=notepad_description)
52
+
53
+
54
+ def rewrite_search_query(q: str, search_history, api_key: str) -> str:
55
+ history_string = '\n'.join(search_history)
56
+ template ="""We are using the Search tool.
57
+ # Previous queries:
58
+ {history_string}. \n\n Rewrite query {action_input} to be
59
+ different from the previous ones."""
60
+ llm = ChatOpenAI(temperature=0, openai_api_key=api_key)
61
+ prompt = PromptTemplate(template=template,
62
+ input_variables=["action_input", "history_string"])
63
+ llm_chain = LLMChain(prompt=prompt, llm=llm)
64
+ return llm_chain.predict(action_input=q, history_string=history_string)
autoagents/utils/__init__.py ADDED
File without changes
autoagents/utils/constants.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MAIN_HEADER = "Web Search Agent"
2
+
3
+ MAIN_CAPTION = """ This is a proof-of-concept search engine built on ReAct-style
4
+ prompting which acts as a search agent that plans and executes web searches on
5
+ your behalf. Given a high-level search query the agent tries to come up with a
6
+ concluding answer based off multiple rounds of searches. You can observe all
7
+ the intermediate interactions with the search engine below."""
8
+
9
+ SAMPLE_QUESTIONS = [
10
+ "What has David Sacks written about SAAS? Can you provide some links?",
11
+ "Where is the all-in summit 2023 being held and how much are the tickets?",
12
+ "Did Stan Druckenmiller buy Nvidia recently?",
13
+ ]
autoagents/utils/logger.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from typing import Dict, Any
4
+ import uuid
5
+
6
+ import huggingface_hub
7
+ from huggingface_hub import Repository
8
+
9
+
10
+ class InteractionsLogger:
11
+ def __init__(self, name: str, persist=False):
12
+ self.persist = persist
13
+ self.counter = 0
14
+ self.name = name # unique id
15
+ HF_TOKEN = os.environ.get("HF_TOKEN")
16
+ HF_DATASET_REPO_URL = os.environ.get("HF_DATASET_REPO_URL")
17
+ if (HF_TOKEN is not None) and (HF_DATASET_REPO_URL is not None):
18
+ self.repo = Repository(
19
+ local_dir="data", clone_from=HF_DATASET_REPO_URL, use_auth_token=HF_TOKEN
20
+ )
21
+ else:
22
+ self.persist = False
23
+
24
+ def set_goal(self, goal: str):
25
+ # Initialize two variables for saving two files (self.messages for
26
+ # training and self.structure_data for later use)
27
+ self.messages = [{"goal": goal}]
28
+ self.structured_data = {"goal": goal}
29
+
30
+ def add_system(self, more: Dict):
31
+ self.convos = [{"from": "system"} | more]
32
+
33
+ def add_ai(self, msg: str):
34
+ self.convos.append({"from": "ai", "value": msg})
35
+ self.messages.append({"id": f"{self.name}_{self.counter}", "conversations": self.convos})
36
+ self.counter += 1
37
+
38
+ def add_structured_data(self, data: Dict[str, Any]):
39
+ self.structured_data.update({f"turn_{self.counter}": data})
40
+
41
+ def add_message(self, data: Dict[str, Any]):
42
+ self.structured_data.update(data)
43
+
44
+ def save(self):
45
+ if self.persist:
46
+ # TODO: want to add retry in a loop?
47
+ self.repo.git_pull()
48
+ fname = uuid.uuid4().hex[:16]
49
+ with open(f"./data/{fname}.json", "w") as f:
50
+ json.dump(self.messages, f, indent=2)
51
+ with open(f"./data/{fname}.clean.json", "w") as f:
52
+ json.dump(self.structured_data, f, indent=2)
53
+ commit_url = self.repo.push_to_hub()
54
+
55
+ def add_cost(self, cost):
56
+ self.messages.append({"metrics": cost})
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ openai>=0.27.7
2
+ langchain
3
+ duckpy
4
+ huggingface_hub
test.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import asyncio
3
+ from action import ActionRunner
4
+ from langchain.callbacks import get_openai_callback
5
+ from pprint import pprint
6
+ import pdb
7
+ from ast import literal_eval
8
+
9
+ async def main(user_input):
10
+ outputq = asyncio.Queue()
11
+
12
+ API_O = os.getenv("OPENAI_API_KEY")
13
+ runner = ActionRunner(outputq, api_key=API_O, model_name="gpt-3.5-turbo")
14
+ task = asyncio.create_task(runner.run(user_input, outputq))
15
+
16
+ while True:
17
+ output = await outputq.get()
18
+ if isinstance(output, Exception):
19
+ print(output)
20
+ return
21
+ try:
22
+ pprint(literal_eval(output))
23
+ except:
24
+ print(output)
25
+ print("-----------------------------------------------------------")
26
+ if "Final Answer:" in output:
27
+ break
28
+ await task
29
+
30
+
31
+ # "list 5 cities and their current populations where Paramore is playing this year.",
32
+ # "Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?",
33
+ # "How many watermelons can fit in a Tesla Model S?",
34
+ # "Recommend me some laptops suitable for UI designers under $2000. Please include brand and price."
35
+ Q = [
36
+ "Build me a vacation plan for Rome and Milan this summer for seven days. Include place to visit and hotels to stay. ",
37
+ "What is the sum of ages of the wives of Barack Obama and Donald Trump?",
38
+ "Who is the most recent NBA MVP? Which team does he play for? What is his season stats?",
39
+ "What were the scores for the last three games for the Los Angeles Lakers? Provide the dates and opposing teams.",
40
+ "Which team won in women's volleyball in the Summer Olympics that was held in London?",
41
+ "Provide a summary of the latest COVID-19 research paper published. Include the title, authors and abstract.",
42
+ "What is the top grossing movie in theatres this week? Provide the movie title, director, and a brief synopsis of the movie's plot. Attach a review for this movie.",
43
+ "Recommend a bagel shop near the Strip district in Pittsburgh that offer vegan food",
44
+ "Who are some top researchers in the field of machine learning systems nowadays?"
45
+ ]
46
+
47
+ loop = asyncio.new_event_loop()
48
+ for i in range(len(Q)):
49
+ loop.run_until_complete(main(Q[i]))